code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def UpperCamelCase ( __magic_name__ : int ) -> str:
"""simple docstring"""
lowercase__ = generate_pascal_triangle(_A )
for row_idx in range(_A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def UpperCamelCase ( __magic_name__ : int ) -> Tuple:
"""simple docstring"""
if not isinstance(_A , _A ):
raise TypeError("""The input value of \'num_rows\' should be \'int\'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of \'num_rows\' should be greater than or equal to 0""" )
lowercase__ = []
for current_row_idx in range(_A ):
lowercase__ = populate_current_row(_A , _A )
triangle.append(_A )
return triangle
def UpperCamelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase__ , lowercase__ = 1, 1
for current_col_idx in range(1 , _A ):
calculate_current_element(
_A , _A , _A , _A )
return current_row
def UpperCamelCase ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , ) -> Any:
"""simple docstring"""
lowercase__ = triangle[current_row_idx - 1][current_col_idx - 1]
lowercase__ = triangle[current_row_idx - 1][current_col_idx]
lowercase__ = above_to_left_elt + above_to_right_elt
def UpperCamelCase ( __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
raise TypeError("""The input value of \'num_rows\' should be \'int\'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of \'num_rows\' should be greater than or equal to 0""" )
lowercase__ = [[1]]
for row_index in range(1 , _A ):
lowercase__ = [0] + result[-1] + [0]
lowercase__ = row_index + 1
# Calculate the number of distinct elements in a row
lowercase__ = sum(divmod(_A , 2 ) )
lowercase__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowercase__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase__ = row_first_half + row_second_half
result.append(_A )
return result
def UpperCamelCase ( ) -> str:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ : Callable , __magic_name__ : int ) -> None:
lowercase__ = f'''{func.__name__}({value})'''
lowercase__ = timeit(f'''__main__.{call}''' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_A , _A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 305
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowercase_ )
requires_backends(self, '''vision''' )
requires_backends(self, '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
a__ ={}
a__ ={}
a__ ={}
# preprocess args
if "points_per_batch" in kwargs:
a__ =kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
a__ =kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
a__ =kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
a__ =kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
a__ =kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
a__ =kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
a__ =kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
a__ =kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
a__ =kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
a__ =kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
a__ =kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
a__ =kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self, lowercase_, *lowercase_, lowercase_=None, lowercase_=None, **lowercase_ ) -> List[Any]:
"""simple docstring"""
return super().__call__(lowercase_, *lowercase_, num_workers=lowercase_, batch_size=lowercase_, **lowercase_ )
def _UpperCAmelCase ( self, lowercase_, lowercase_=64, lowercase_ = 0, lowercase_ = 512 / 1500, lowercase_ = 32, lowercase_ = 1, ) -> Any:
"""simple docstring"""
a__ =load_image(lowercase_ )
a__ =self.image_processor.size['''longest_edge''']
a__, a__, a__, a__ =self.image_processor.generate_crop_boxes(
lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ )
a__ =self.image_processor(images=lowercase_, return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
a__ =self.get_inference_context()
with inference_context():
a__ =self._ensure_tensor_on_device(lowercase_, device=self.device )
a__ =self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
a__ =image_embeddings
a__ =grid_points.shape[1]
a__ =points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0, lowercase_, lowercase_ ):
a__ =grid_points[:, i : i + points_per_batch, :, :]
a__ =input_labels[:, i : i + points_per_batch]
a__ =i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _UpperCAmelCase ( self, lowercase_, lowercase_=0.88, lowercase_=0.95, lowercase_=0, lowercase_=1, ) -> int:
"""simple docstring"""
a__ =model_inputs.pop('''input_boxes''' )
a__ =model_inputs.pop('''is_last''' )
a__ =model_inputs.pop('''original_sizes''' ).tolist()
a__ =model_inputs.pop('''reshaped_input_sizes''' ).tolist()
a__ =self.model(**lowercase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
a__ =model_outputs['''pred_masks''']
a__ =self.image_processor.post_process_masks(
lowercase_, lowercase_, lowercase_, lowercase_, binarize=lowercase_ )
a__ =model_outputs['''iou_scores''']
a__, a__, a__ =self.image_processor.filter_masks(
masks[0], iou_scores[0], original_sizes[0], input_boxes[0], lowercase_, lowercase_, lowercase_, lowercase_, )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _UpperCAmelCase ( self, lowercase_, lowercase_=False, lowercase_=False, lowercase_=0.7, ) -> Any:
"""simple docstring"""
a__ =[]
a__ =[]
a__ =[]
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
a__ =torch.cat(lowercase_ )
a__ =torch.cat(lowercase_ )
a__, a__, a__, a__ =self.image_processor.post_process_for_mask_generation(
lowercase_, lowercase_, lowercase_, lowercase_ )
a__ =defaultdict(lowercase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase_ )
a__ ={}
if output_rle_mask:
a__ =rle_mask
if output_bboxes_mask:
a__ =bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 188
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 352
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Any = RoCBertTokenizer
__lowercase : List[str] = None
__lowercase : Union[str, Any] = False
__lowercase : Optional[Any] = True
__lowercase : int = filter_non_english
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
for i, value in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(lowerCAmelCase__ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCAmelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCAmelCase__) , [5, 6, 2, 5, 7, 8])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def snake_case_ ( self):
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def snake_case_ ( self):
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def snake_case_ ( self):
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def snake_case_ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , """do_lower_case""") else False
__SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
__SCREAMING_SNAKE_CASE = """""".join(lowerCAmelCase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__)
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__)
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""你好""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""你是谁""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
__SCREAMING_SNAKE_CASE = """你好,你是谁"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 255
| 0
|
"""simple docstring"""
from __future__ import annotations
_A = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_A = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a__ ( lowerCAmelCase ) -> list[float]:
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Any = len(lowerCAmelCase )
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : float = -1
for j in range(i + 1 , lowerCAmelCase ):
if arr[i] < arr[j]:
UpperCAmelCase__ : Union[str, Any] = arr[j]
break
result.append(lowerCAmelCase )
return result
def a__ ( lowerCAmelCase ) -> list[float]:
UpperCAmelCase__ : List[str] = []
for i, outer in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCAmelCase__ : int = inner
break
result.append(lowerCAmelCase )
return result
def a__ ( lowerCAmelCase ) -> list[float]:
UpperCAmelCase__ : Union[str, Any] = len(lowerCAmelCase )
UpperCAmelCase__ : list[float] = []
UpperCAmelCase__ : list[float] = [-1] * arr_size
for index in reversed(range(lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCAmelCase__ : int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_A = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 171
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : str = {}
def _a (self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = super().add_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def _a (self , _lowerCamelCase , *_lowerCamelCase , _lowerCamelCase=1 , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = []
if num_vec_per_token == 1:
self.try_adding_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
output.append(_lowerCamelCase )
else:
UpperCAmelCase__ : Any = []
for i in range(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
output.append(_lowerCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
UpperCAmelCase__ : Dict = output
def _a (self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1.0 ):
"""simple docstring"""
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : str = []
for i in range(len(_lowerCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCAmelCase__ : List[str] = self.token_map[placeholder_token]
UpperCAmelCase__ : Any = tokens[: 1 + int(len(_lowerCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
UpperCAmelCase__ : Any = copy.copy(_lowerCamelCase )
random.shuffle(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = text.replace(_lowerCamelCase , """ """.join(_lowerCamelCase ) )
return text
def __call__(self , _lowerCamelCase , *_lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1.0 , **_lowerCamelCase ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowerCamelCase , vector_shuffle=_lowerCamelCase , prop_tokens_to_load=_lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase , )
def _a (self , _lowerCamelCase , *_lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1.0 , **_lowerCamelCase ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowerCamelCase , vector_shuffle=_lowerCamelCase , prop_tokens_to_load=_lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase , )
| 171
| 1
|
"""simple docstring"""
import torch
from torch import nn
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=False ):
super().__init__()
snake_case_ = n_token
snake_case_ = d_embed
snake_case_ = d_proj
snake_case_ = cutoffs + [n_token]
snake_case_ = [0] + self.cutoffs
snake_case_ = div_val
snake_case_ = self.cutoffs[0]
snake_case_ = len(self.cutoffs ) - 1
snake_case_ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
snake_case_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
snake_case_ = nn.Parameter(torch.zeros(self.n_clusters ) )
snake_case_ = nn.ModuleList()
snake_case_ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase ) ) )
else:
self.out_projs.append(_UpperCAmelCase )
self.out_layers.append(nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(_UpperCAmelCase , r_idx - l_idx ) )
snake_case_ = keep_order
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if proj is None:
snake_case_ = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
snake_case_ = nn.functional.linear(_UpperCAmelCase , proj.t().contiguous() )
snake_case_ = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if labels is not None:
# Shift so that tokens < n predict n
snake_case_ = hidden[..., :-1, :].contiguous()
snake_case_ = labels[..., 1:].contiguous()
snake_case_ = hidden.view(-1 , hidden.size(-1 ) )
snake_case_ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
snake_case_ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
snake_case_ = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
snake_case_ = labels != -1_00
snake_case_ = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
snake_case_ = (
-nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
snake_case_ = nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
snake_case_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ = self.out_layers[0].weight[l_idx:r_idx]
snake_case_ = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case_ = self.out_layers[i].weight
snake_case_ = self.out_layers[i].bias
if i == 0:
snake_case_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_UpperCAmelCase )
biases.append(_UpperCAmelCase )
snake_case_ = weights[0], biases[0], self.out_projs[0]
snake_case_ = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
if labels is None:
snake_case_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
snake_case_ = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
snake_case_ = 0
snake_case_ = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase ) - 1 ):
snake_case_ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
snake_case_ = (labels >= l_idx) & (labels < r_idx)
snake_case_ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
snake_case_ = labels.index_select(0 , _UpperCAmelCase ) - l_idx
snake_case_ = head_logprob.index_select(0 , _UpperCAmelCase )
snake_case_ = hidden.index_select(0 , _UpperCAmelCase )
else:
snake_case_ = hidden
if i == 0:
if labels is not None:
snake_case_ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
snake_case_ = head_logprob[:, : self.cutoffs[0]]
else:
snake_case_ = weights[i], biases[i], self.out_projs[i]
snake_case_ = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
snake_case_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
snake_case_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
snake_case_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
snake_case_ = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _UpperCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if self.n_clusters == 0:
snake_case_ = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
snake_case_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ = self.out_layers[0].weight[l_idx:r_idx]
snake_case_ = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case_ = self.out_layers[i].weight
snake_case_ = self.out_layers[i].bias
if i == 0:
snake_case_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_UpperCAmelCase )
biases.append(_UpperCAmelCase )
snake_case_ = weights[0], biases[0], self.out_projs[0]
snake_case_ = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
snake_case_ = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
snake_case_ = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase ) - 1 ):
snake_case_ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
snake_case_ = head_logprob[:, : self.cutoffs[0]]
else:
snake_case_ = weights[i], biases[i], self.out_projs[i]
snake_case_ = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = nn.functional.log_softmax(_UpperCAmelCase , dim=1 )
snake_case_ = head_logprob[:, -i] + tail_logprob_i
snake_case_ = logprob_i
return out
| 352
|
from __future__ import annotations
import time
UpperCAmelCase = list[tuple[int, int]]
UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = parent
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = Node(start[1] , start[0] , goal[1] , goal[0] , _UpperCAmelCase )
snake_case_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , _UpperCAmelCase )
snake_case_ = [self.start]
snake_case_ = False
def UpperCamelCase__ ( self ):
while self.node_queue:
snake_case_ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case_ = True
return self.retrace_path(_UpperCAmelCase )
snake_case_ = self.get_successors(_UpperCAmelCase )
for node in successors:
self.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_UpperCAmelCase , _UpperCAmelCase , self.target.pos_y , self.target.pos_x , _UpperCAmelCase ) )
return successors
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ = current_node.parent
path.reverse()
return path
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = False
def UpperCamelCase__ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case_ = self.fwd_bfs.node_queue.pop(0 )
snake_case_ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case_ = True
return self.retrace_bidirectional_path(
_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = current_bwd_node
snake_case_ = current_fwd_node
snake_case_ = {
self.fwd_bfs: self.fwd_bfs.get_successors(_UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.fwd_bfs.retrace_path(_UpperCAmelCase )
snake_case_ = self.bwd_bfs.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
snake_case_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCAmelCase = (0, 0)
UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase = time.time()
UpperCAmelCase = BreadthFirstSearch(init, goal)
UpperCAmelCase = bfs.search()
UpperCAmelCase = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
UpperCAmelCase = time.time()
UpperCAmelCase = BidirectionalBreadthFirstSearch(init, goal)
UpperCAmelCase = bd_bfs.search()
UpperCAmelCase = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 267
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : Any = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class __lowercase ( _lowercase , _lowercase ):
lowerCamelCase : Tuple = "resnet"
lowerCamelCase : Optional[int] = ["basic", "bottleneck"]
def __init__(self , A=3 , A=6_4 , A=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , A=[3, 4, 6, 3] , A="bottleneck" , A="relu" , A=False , A=None , A=None , **A , ):
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
lowerCamelCase_ : str = num_channels
lowerCamelCase_ : Tuple = embedding_size
lowerCamelCase_ : List[Any] = hidden_sizes
lowerCamelCase_ : Optional[Any] = depths
lowerCamelCase_ : List[Any] = layer_type
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Any = downsample_in_first_stage
lowerCamelCase_ : int = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(A ) + 1 )]
lowerCamelCase_, lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = version.parse("1.11" )
@property
def UpperCAmelCase__ (self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ (self ):
return 1E-3
| 318
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318
| 1
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE_: Union[str, Any] = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
assert base_extractor.is_extractable(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(_UpperCAmelCase , _UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE_: Any = file_path.read_text(encoding="utf-8" )
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = output_path.read_text(encoding="utf-8" )
SCREAMING_SNAKE_CASE_: str = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: List[str] = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
SCREAMING_SNAKE_CASE_: Optional[int] = input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE_: Optional[int] = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = Extractor.infer_extractor_format(_UpperCAmelCase )
assert extractor_format is not None
SCREAMING_SNAKE_CASE_: Optional[Any] = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE_: Optional[int] = file_path.read_text(encoding="utf-8" )
else:
SCREAMING_SNAKE_CASE_: List[str] = output_path.read_text(encoding="utf-8" )
SCREAMING_SNAKE_CASE_: Any = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
import tarfile
SCREAMING_SNAKE_CASE_: List[Any] = tmp_path / "data_dot_dot"
directory.mkdir()
SCREAMING_SNAKE_CASE_: Optional[Any] = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(_UpperCAmelCase , "w" ) as f:
f.add(_UpperCAmelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def A_ ( _UpperCAmelCase ):
import tarfile
SCREAMING_SNAKE_CASE_: Union[str, Any] = tmp_path / "data_sym_link"
directory.mkdir()
SCREAMING_SNAKE_CASE_: Any = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=_UpperCAmelCase )
with tarfile.TarFile(_UpperCAmelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE_: Optional[Any] = insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE_: Optional[Any] = tmp_path / "extracted"
TarExtractor.extract(_UpperCAmelCase , _UpperCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def A_ ( _UpperCAmelCase ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
SCREAMING_SNAKE_CASE_: Tuple = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE_: str = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(_UpperCAmelCase )
assert zipfile.is_zipfile(str(_UpperCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_UpperCAmelCase ) # but we're right
| 127
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase : Optional[Any] = 50003
lowerCAmelCase : List[str] = 50002
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = PLBartTokenizer
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : str):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: Tuple = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[int] = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
SCREAMING_SNAKE_CASE_: int = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_: Tuple = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__) for x in range(end - 4 , lowerCAmelCase__)]
self.assertListEqual(lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "<mask>"])
SCREAMING_SNAKE_CASE_: Optional[int] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer(lowerCAmelCase__).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__) , lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = PLBartTokenizer(lowerCAmelCase__ , language_codes="multi" , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_: int = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__) for x in range(end - 7 , lowerCAmelCase__)]
self.assertListEqual(
lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"])
SCREAMING_SNAKE_CASE_: str = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__) , lowerCAmelCase__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = '''uclanlp/plbart-python-en_XX'''
_UpperCAmelCase : List[str] = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
_UpperCAmelCase : int = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
_UpperCAmelCase : Optional[Any] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict):
SCREAMING_SNAKE_CASE_: PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX")
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : List[str]):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0003)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids)
SCREAMING_SNAKE_CASE_: Optional[Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE_: int = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: Optional[Any] = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , lowerCAmelCase__)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]) , [5_0004, 5_0001])
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = PLBartTokenizer.from_pretrained(lowerCAmelCase__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase__)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
SCREAMING_SNAKE_CASE_: Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="pt")
SCREAMING_SNAKE_CASE_: Any = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="pt")
SCREAMING_SNAKE_CASE_: List[Any] = targets["input_ids"]
SCREAMING_SNAKE_CASE_: List[Any] = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java")
self.assertEqual(
nested_simplify(lowerCAmelCase__) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 5_0003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0001,
} , )
| 127
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase ( A_ ):
A__ : List[Any] = "canine"
def __init__(self : Dict , snake_case__ : Dict=7_68 , snake_case__ : Tuple=12 , snake_case__ : Optional[int]=12 , snake_case__ : Optional[Any]=30_72 , snake_case__ : List[Any]="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[str]=1_63_84 , snake_case__ : List[Any]=16 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=1e-12 , snake_case__ : Tuple=0 , snake_case__ : Optional[int]=0XE_0_0_0 , snake_case__ : Dict=0XE_0_0_1 , snake_case__ : int=4 , snake_case__ : Union[str, Any]=4 , snake_case__ : Union[str, Any]=8 , snake_case__ : List[str]=1_63_84 , snake_case__ : List[str]=1_28 , **snake_case__ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
snake_case : Any = max_position_embeddings
snake_case : List[Any] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : str = initializer_range
snake_case : int = type_vocab_size
snake_case : List[Any] = layer_norm_eps
# Character config:
snake_case : str = downsampling_rate
snake_case : Dict = upsampling_kernel_size
snake_case : List[str] = num_hash_functions
snake_case : Optional[int] = num_hash_buckets
snake_case : Dict = local_transformer_stride
| 59
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263
| 0
|
import heapq
import sys
import numpy as np
__A : int = tuple[int, int]
class __A :
def __init__( self : Optional[int] ):
lowerCAmelCase : Tuple = []
lowerCAmelCase : List[Any] = set()
def lowercase__ ( self : Dict ):
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def lowercase__ ( self : str ):
return len(self.elements ) == 0
def lowercase__ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCAmelCase_ )
else:
# update
# print("update", item)
lowerCAmelCase : Any = []
((lowerCAmelCase) , (lowerCAmelCase)) : List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCAmelCase) , (lowerCAmelCase)) : Optional[Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
if item in self.set:
self.set.remove(UpperCAmelCase_ )
lowerCAmelCase : Any = []
((lowerCAmelCase) , (lowerCAmelCase)) : str = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCAmelCase) , (lowerCAmelCase)) : List[str] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowercase__ ( self : Optional[Any] ):
return self.elements[0][1]
def lowercase__ ( self : int ):
((lowerCAmelCase) , (lowerCAmelCase)) : List[str] = heapq.heappop(self.elements )
self.set.remove(UpperCAmelCase_ )
return (priority, item)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.array(_UpperCAmelCase )
lowerCAmelCase : Any = np.array(_UpperCAmelCase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return consistent_heuristic(_UpperCAmelCase, _UpperCAmelCase ) // t
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[str] = g_function[start] + Wa * heuristics[i](_UpperCAmelCase, _UpperCAmelCase )
return ans
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Any = np.chararray((n, n) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = '*'
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
lowerCAmelCase : Tuple = '#'
lowerCAmelCase : List[Any] = '-'
lowerCAmelCase : Optional[Any] = back_pointer[goal]
while x != start:
((lowerCAmelCase) , (lowerCAmelCase)) : Tuple = x
# print(x)
lowerCAmelCase : Tuple = '-'
lowerCAmelCase : str = back_pointer[x]
lowerCAmelCase : Optional[int] = '-'
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j], end=' ' )
print('<-- End position', end=' ' )
else:
print(grid[i][j], end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowerCAmelCase : Dict = back_pointer[goal]
while x != start:
print(_UpperCAmelCase, end=' ' )
lowerCAmelCase : List[Any] = back_pointer[x]
print(_UpperCAmelCase )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> int:
'''simple docstring'''
for itera in range(_UpperCAmelCase ):
open_list[itera].remove_element(_UpperCAmelCase )
# print("s", s)
# print("j", j)
((lowerCAmelCase) , (lowerCAmelCase)) : Union[str, Any] = s
lowerCAmelCase : List[Any] = (x - 1, y)
lowerCAmelCase : Optional[Any] = (x + 1, y)
lowerCAmelCase : Optional[int] = (x, y + 1)
lowerCAmelCase : str = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Optional[int] = float('inf' )
if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
lowerCAmelCase : Any = g_function[s] + 1
lowerCAmelCase : Optional[Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCAmelCase, key(_UpperCAmelCase, 0, _UpperCAmelCase, _UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1, _UpperCAmelCase ):
if key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) <= Wa * key(
_UpperCAmelCase, 0, _UpperCAmelCase, _UpperCAmelCase ):
open_list[j].put(
_UpperCAmelCase, key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Tuple = []
for x in range(1, 5 ):
for y in range(1, 6 ):
some_list.append((x, y) )
for x in range(15, 20 ):
some_list.append((x, 17) )
for x in range(10, 19 ):
for y in range(1, 15 ):
some_list.append((x, y) )
# L block
for x in range(1, 4 ):
for y in range(12, 19 ):
some_list.append((x, y) )
for x in range(3, 13 ):
for y in range(16, 19 ):
some_list.append((x, y) )
return some_list
__A : Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__A : str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__A : Any = make_common_ground()
__A : List[Any] = blocks_blk
# hyper parameters
__A : Union[str, Any] = 1
__A : Dict = 1
__A : str = 20
__A : int = 3 # one consistent and two other inconsistent
# start and end destination
__A : str = (0, 0)
__A : List[str] = (n - 1, n - 1)
__A : int = 1
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[int] = {start: 0, goal: float('inf' )}
lowerCAmelCase : List[str] = {start: -1, goal: -1}
lowerCAmelCase : Dict = []
lowerCAmelCase : Any = set()
for i in range(_UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCAmelCase, key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) )
lowerCAmelCase : list[int] = []
lowerCAmelCase : list[int] = []
while open_list[0].minkey() < float('inf' ):
for i in range(1, _UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
else:
lowerCAmelCase , lowerCAmelCase : int = open_list[i].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, )
close_list_inad.append(_UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
else:
lowerCAmelCase : Tuple = open_list[0].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase, 0, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, )
close_list_anchor.append(_UpperCAmelCase )
print('No path found to goal' )
print()
for i in range(n - 1, -1, -1 ):
for j in range(_UpperCAmelCase ):
if (j, i) in blocks:
print('#', end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*', end=' ' )
else:
print('-', end=' ' )
else:
print('*', end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position', end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 323
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 1
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
lowercase : List[str] = cva.getAffineTransform(_UpperCAmelCase , _UpperCAmelCase )
return cva.warpAffine(_UpperCAmelCase , _UpperCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
_UpperCamelCase: List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
_UpperCamelCase: List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_UpperCamelCase , _UpperCamelCase: Union[str, Any] = gray_img.shape
# set different points to rotate image
_UpperCamelCase: str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
_UpperCamelCase: Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
_UpperCamelCase: str = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
_UpperCamelCase: Optional[Any] = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
_UpperCamelCase: int = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_UpperCamelCase: str = plt.figure(1)
_UpperCamelCase: List[Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 255
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a__ :
def __init__( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[Any]=13, lowerCAmelCase : str=7, lowerCAmelCase : Union[str, Any]=True, lowerCAmelCase : Optional[int]=True, lowerCAmelCase : Dict=True, lowerCAmelCase : List[str]=True, lowerCAmelCase : List[Any]=99, lowerCAmelCase : Tuple=32, lowerCAmelCase : int=2, lowerCAmelCase : Dict=4, lowerCAmelCase : List[str]=37, lowerCAmelCase : Any="gelu", lowerCAmelCase : Optional[int]=0.1, lowerCAmelCase : Tuple=0.1, lowerCAmelCase : Optional[int]=512, lowerCAmelCase : Dict=16, lowerCAmelCase : Tuple=2, lowerCAmelCase : Union[str, Any]=0.02, lowerCAmelCase : str=3, lowerCAmelCase : Any=4, lowerCAmelCase : List[str]=None, lowerCAmelCase : Union[str, Any]=1000, ) -> Dict:
lowercase : Optional[Any] = parent
lowercase : Tuple = batch_size
lowercase : List[Any] = seq_length
lowercase : List[str] = is_training
lowercase : Optional[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : List[Any] = use_labels
lowercase : Optional[Any] = vocab_size
lowercase : int = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : str = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_vocab_size
lowercase : Optional[int] = type_sequence_label_size
lowercase : str = initializer_range
lowercase : Any = num_labels
lowercase : List[Any] = num_choices
lowercase : Optional[int] = scope
lowercase : str = range_bbox
def lowercase ( self : str ) -> Optional[int]:
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase : str = bbox[i, j, 3]
lowercase : Tuple = bbox[i, j, 1]
lowercase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase : Optional[int] = bbox[i, j, 2]
lowercase : List[str] = bbox[i, j, 0]
lowercase : Union[str, Any] = t
lowercase : Any = tf.convert_to_tensor(lowerCAmelCase )
lowercase : Optional[Any] = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_token_type_ids:
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase : Dict = None
lowercase : List[str] = None
lowercase : List[Any] = None
if self.use_labels:
lowercase : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
lowercase : Tuple = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : int, lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[Any], lowerCAmelCase : Tuple, lowerCAmelCase : List[Any] ) -> Dict:
lowercase : Dict = TFLayoutLMModel(config=lowerCAmelCase )
lowercase : str = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : Union[str, Any] = model(lowerCAmelCase, lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowercase : Any = model(lowerCAmelCase, lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase ( self : Tuple, lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int], lowerCAmelCase : Tuple, lowerCAmelCase : List[str] ) -> Any:
lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=lowerCAmelCase )
lowercase : List[Any] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Any, lowerCAmelCase : Tuple, lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : List[str], lowerCAmelCase : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : int ) -> List[str]:
lowercase : Optional[Any] = self.num_labels
lowercase : Optional[int] = TFLayoutLMForSequenceClassification(config=lowerCAmelCase )
lowercase : Tuple = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase ( self : Tuple, lowerCAmelCase : Union[str, Any], lowerCAmelCase : List[str], lowerCAmelCase : Optional[int], lowerCAmelCase : List[str], lowerCAmelCase : List[str], lowerCAmelCase : int, lowerCAmelCase : Optional[Any], lowerCAmelCase : Dict ) -> Dict:
lowercase : Optional[int] = self.num_labels
lowercase : int = TFLayoutLMForTokenClassification(config=lowerCAmelCase )
lowercase : List[str] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Any, lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str], lowerCAmelCase : List[Any], lowerCAmelCase : List[Any], lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Tuple ) -> Optional[Any]:
lowercase : List[str] = TFLayoutLMForQuestionAnswering(config=lowerCAmelCase )
lowercase : Optional[int] = model(lowerCAmelCase, lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = 10
def lowercase ( self : Tuple ) -> int:
lowercase : int = TFLayoutLMModelTester(self )
lowercase : int = ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def lowercase ( self : str ) -> List[Any]:
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase ( self : List[Any] ) -> Tuple:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def lowercase ( self : int ) -> List[str]:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def lowercase ( self : Dict ) -> int:
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
def lowercase ( self : List[str] ) -> Any:
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
@slow
def lowercase ( self : Dict ) -> List[Any]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFLayoutLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def lowercase ( self : List[Any] ) -> List[Any]:
pass
def lowercase__ ( ) -> str:
'''simple docstring'''
lowercase : Any = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
lowercase : List[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase : List[str] = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
lowercase : Dict = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase : List[Any] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a__ ( unittest.TestCase ):
@slow
def lowercase ( self : Optional[int] ) -> str:
lowercase : Any = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase , lowercase , lowercase , lowercase , lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : List[str] = model(input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
# test the sequence output on [0, :3, :3]
lowercase : Dict = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], lowerCAmelCase, atol=1e-3 ) )
# test the pooled output on [1, :3]
lowercase : Any = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], lowerCAmelCase, atol=1e-3 ) )
@slow
def lowercase ( self : List[Any] ) -> Any:
# initialize model with randomly initialized sequence classification head
lowercase : List[str] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased', num_labels=2 )
lowercase , lowercase , lowercase , lowercase , lowercase : Any = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : Optional[int] = model(
input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=tf.convert_to_tensor([1, 1] ), )
# test whether we get a loss as a scalar
lowercase : List[str] = outputs.loss
lowercase : List[Any] = (2,)
self.assertEqual(loss.shape, lowerCAmelCase )
# test the shape of the logits
lowercase : str = outputs.logits
lowercase : List[str] = (2, 2)
self.assertEqual(logits.shape, lowerCAmelCase )
@slow
def lowercase ( self : List[Any] ) -> str:
# initialize model with randomly initialized token classification head
lowercase : Tuple = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased', num_labels=13 )
lowercase , lowercase , lowercase , lowercase , lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : List[str] = model(
input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
# test the shape of the logits
lowercase : Union[str, Any] = outputs.logits
lowercase : Union[str, Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape, lowerCAmelCase )
@slow
def lowercase ( self : Union[str, Any] ) -> int:
# initialize model with randomly initialized token classification head
lowercase : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowercase , lowercase , lowercase , lowercase , lowercase : Any = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : int = model(input_ids=lowerCAmelCase, bbox=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
# test the shape of the logits
lowercase : str = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape, lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape, lowerCAmelCase )
| 255
| 1
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = relative_attention
__lowerCAmelCase = position_biased_input
__lowerCAmelCase = pos_att_type
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case ( self , __a ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = DebertaVaModel(config=__A )
model.to(__A )
model.eval()
__lowerCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A )[0]
__lowerCAmelCase = model(__A , token_type_ids=__A )[0]
__lowerCAmelCase = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
__lowerCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
__lowerCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
__lowerCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__lowerCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
__lowerCAmelCase
) = config_and_inputs
__lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[int] =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any =True
__UpperCAmelCase : Optional[int] =False
__UpperCAmelCase : str =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : int =False
def snake_case ( self ):
__lowerCAmelCase = DebertaVaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__A , hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def snake_case ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def snake_case ( self ):
pass
@slow
def snake_case ( self ):
__lowerCAmelCase = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
__lowerCAmelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase = model(__A , attention_mask=__A )[0]
# compare the actual values for a slice.
__lowerCAmelCase = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) , f"{output[:, 1:4, 1:4]}" )
| 361
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Optional[int] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""xlnet"""
__UpperCAmelCase : Tuple =["""mems"""]
__UpperCAmelCase : List[str] ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __a=3_20_00 , __a=10_24 , __a=24 , __a=16 , __a=40_96 , __a="gelu" , __a=True , __a="bi" , __a=0.0_2 , __a=1e-1_2 , __a=0.1 , __a=5_12 , __a=None , __a=True , __a=False , __a=False , __a=-1 , __a=False , __a="last" , __a=True , __a="tanh" , __a=0.1 , __a=5 , __a=5 , __a=5 , __a=1 , __a=2 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
__lowerCAmelCase = d_model // n_head
__lowerCAmelCase = ff_activation
__lowerCAmelCase = d_inner
__lowerCAmelCase = untie_r
__lowerCAmelCase = attn_type
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = dropout
__lowerCAmelCase = mem_len
__lowerCAmelCase = reuse_len
__lowerCAmelCase = bi_data
__lowerCAmelCase = clamp_len
__lowerCAmelCase = same_length
__lowerCAmelCase = summary_type
__lowerCAmelCase = summary_use_proj
__lowerCAmelCase = summary_activation
__lowerCAmelCase = summary_last_dropout
__lowerCAmelCase = start_n_top
__lowerCAmelCase = end_n_top
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __a , )
__lowerCAmelCase = kwargs["use_cache"]
__lowerCAmelCase = use_mems_eval
__lowerCAmelCase = use_mems_train
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
@property
def snake_case ( self ):
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def snake_case ( self , __a ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 259
| 0
|
"""simple docstring"""
from typing import List
import numpy as np
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = {key: len(__lowerCamelCase ) for key, value in gen_kwargs.items() if isinstance(__lowerCamelCase, __lowerCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
UpperCAmelCase_ : List[str] = max(lists_lengths.values(), default=0 )
return max(1, __lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = []
for group_idx in range(__lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCAmelCase_ : Optional[Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCAmelCase_ : List[Any] = range(__lowerCamelCase, start + num_shards_to_add )
shards_indices_per_group.append(__lowerCamelCase )
return shards_indices_per_group
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
if num_shards == 1:
return [dict(__lowerCamelCase )]
else:
UpperCAmelCase_ : Any = _distribute_shards(num_shards=__lowerCamelCase, max_num_jobs=__lowerCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowerCamelCase, __lowerCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowerCamelCase ) )
]
def __a ( __lowerCamelCase ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], __lowerCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = {len(__lowerCamelCase ) for value in gen_kwargs.values() if isinstance(__lowerCamelCase, __lowerCamelCase )}
UpperCAmelCase_ : List[str] = {}
for size in list_sizes:
UpperCAmelCase_ : Tuple = list(range(__lowerCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCAmelCase_ : Optional[int] = dict(__lowerCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = [value[i] for i in indices_per_size[len(__lowerCamelCase )]]
return shuffled_kwargs
| 61
|
'''simple docstring'''
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
if n == 0:
return 0
__SCREAMING_SNAKE_CASE = float("""-inf""" )
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max(
a__ , prices[i - 1] + naive_cut_rod_recursive(n - i , a__ ) )
return max_revue
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
__SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__SCREAMING_SNAKE_CASE = float("""-inf""" )
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max(
a__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , a__ , a__ ) , )
__SCREAMING_SNAKE_CASE = max_revenue
return max_rev[n]
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max_rev[i]
for j in range(1 , i + 1 ):
__SCREAMING_SNAKE_CASE = max(a__ , prices[j - 1] + max_rev[i - j] )
__SCREAMING_SNAKE_CASE = max_revenue_i
return max_rev[n]
def a__ ( a__ , a__ ):
"""simple docstring"""
if n < 0:
__SCREAMING_SNAKE_CASE = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(a__ )
if n > len(a__ ):
__SCREAMING_SNAKE_CASE = (
"""Each integral piece of rod must have a corresponding price. """
F'Got n = {n} but length of prices = {len(a__ )}'
)
raise ValueError(a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [6, 10, 12, 15, 20, 23]
__SCREAMING_SNAKE_CASE = len(a__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__SCREAMING_SNAKE_CASE = 36
__SCREAMING_SNAKE_CASE = top_down_cut_rod(a__ , a__ )
__SCREAMING_SNAKE_CASE = bottom_up_cut_rod(a__ , a__ )
__SCREAMING_SNAKE_CASE = naive_cut_rod_recursive(a__ , a__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 267
| 0
|
'''simple docstring'''
_lowerCamelCase = 'Input must be a string of 8 numbers plus letter'
_lowerCamelCase = 'TRWAGMYFPDXBNJZSQVHLCKE'
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase_ : Dict = F'''Expected string as input, found {type(lowerCamelCase_ ).__name__}'''
raise TypeError(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = spanish_id.replace("-" , "" ).upper()
if len(lowerCamelCase_ ) != 9:
raise ValueError(lowerCamelCase_ )
try:
UpperCAmelCase_ : Union[str, Any] = int(spanish_id_clean[0:8] )
UpperCAmelCase_ : Optional[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCamelCase_ ) from ex
if letter.isdigit():
raise ValueError(lowerCamelCase_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _snake_case (unittest.TestCase):
def __init__( self ,_snake_case ,_snake_case=7 ,_snake_case=3 ,_snake_case=18 ,_snake_case=30 ,_snake_case=4_00 ,_snake_case=True ,_snake_case=None ,_snake_case=True ,_snake_case=None ,_snake_case=True ,_snake_case=[0.48145466, 0.4578275, 0.40821073] ,_snake_case=[0.26862954, 0.26130258, 0.27577711] ,_snake_case=True ,):
UpperCAmelCase_ : List[str] = size if size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Dict = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : List[Any] = do_resize
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : Union[str, Any] = do_center_crop
UpperCAmelCase_ : Any = crop_size
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : Tuple = image_mean
UpperCAmelCase_ : List[Any] = image_std
UpperCAmelCase_ : Dict = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self ,_snake_case=False ,_snake_case=False ,_snake_case=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCAmelCase_ : Optional[int] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
UpperCAmelCase_ : Optional[Any] = []
for i in range(self.batch_size ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(2_55 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCAmelCase_ : Optional[int] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
if torchify:
UpperCAmelCase_ : Optional[Any] = [torch.from_numpy(_snake_case ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Tuple =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = ChineseCLIPImageProcessingTester(self ,do_center_crop=_snake_case )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case ,"do_resize" ) )
self.assertTrue(hasattr(_snake_case ,"size" ) )
self.assertTrue(hasattr(_snake_case ,"do_center_crop" ) )
self.assertTrue(hasattr(_snake_case ,"center_crop" ) )
self.assertTrue(hasattr(_snake_case ,"do_normalize" ) )
self.assertTrue(hasattr(_snake_case ,"image_mean" ) )
self.assertTrue(hasattr(_snake_case ,"image_std" ) )
self.assertTrue(hasattr(_snake_case ,"do_convert_rgb" ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 2_24, "width": 2_24} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
UpperCAmelCase_ : int = image_processing(_snake_case ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase__ ( self ):
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case ,numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(_snake_case ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase__ ( self ):
# Initialize image_processing
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case ,torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
UpperCAmelCase_ : List[str] = image_processing(_snake_case ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
@require_torch
@require_vision
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=_snake_case )
UpperCAmelCase_ : Optional[Any] = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case ,"do_resize" ) )
self.assertTrue(hasattr(_snake_case ,"size" ) )
self.assertTrue(hasattr(_snake_case ,"do_center_crop" ) )
self.assertTrue(hasattr(_snake_case ,"center_crop" ) )
self.assertTrue(hasattr(_snake_case ,"do_normalize" ) )
self.assertTrue(hasattr(_snake_case ,"image_mean" ) )
self.assertTrue(hasattr(_snake_case ,"image_std" ) )
self.assertTrue(hasattr(_snake_case ,"do_convert_rgb" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : str = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
UpperCAmelCase_ : Any = image_processing(_snake_case ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 67
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_SCREAMING_SNAKE_CASE : int = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def UpperCAmelCase__ (UpperCamelCase_=None ):
"""simple docstring"""
if subparsers is not None:
snake_case = subparsers.add_parser('''tpu-config''' ,description=_description )
else:
snake_case = argparse.ArgumentParser('''Accelerate tpu-config command''' ,description=_description )
# Core arguments
snake_case = parser.add_argument_group(
'''Config Arguments''' ,'''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' ,type=UpperCamelCase_ ,default=UpperCamelCase_ ,help='''Path to the config file to use for accelerate.''' ,)
config_args.add_argument(
'''--tpu_name''' ,default=UpperCamelCase_ ,help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' ,)
config_args.add_argument(
'''--tpu_zone''' ,default=UpperCamelCase_ ,help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' ,)
snake_case = parser.add_argument_group('''TPU Arguments''' ,'''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' ,action='''store_true''' ,help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' ,)
pod_args.add_argument(
'''--command_file''' ,default=UpperCamelCase_ ,help='''The path to the file containing the commands to run on the pod on startup.''' ,)
pod_args.add_argument(
'''--command''' ,action='''append''' ,nargs='''+''' ,help='''A command to run on the pod. Can be passed multiple times.''' ,)
pod_args.add_argument(
'''--install_accelerate''' ,action='''store_true''' ,help='''Whether to install accelerate on the pod. Defaults to False.''' ,)
pod_args.add_argument(
'''--accelerate_version''' ,default='''latest''' ,help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' ,)
pod_args.add_argument(
'''--debug''' ,action='''store_true''' ,help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase_ )
return parser
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCamelCase_ ):
snake_case = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case = defaults.commands
if not args.tpu_name:
snake_case = defaults.tpu_name
if not args.tpu_zone:
snake_case = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
snake_case = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) ,UpperCamelCase_ ):
snake_case = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file ,'''r''' ) as f:
snake_case = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] ,UpperCamelCase_ ):
snake_case = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
snake_case = '''; '''.join(UpperCamelCase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(UpperCamelCase_ )}''' )
return
subprocess.run(UpperCamelCase_ )
print('''Successfully setup pod.''' )
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = tpu_command_parser()
snake_case = parser.parse_args()
tpu_command_launcher(UpperCamelCase_ )
| 127
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = BlenderbotSmallTokenizer
__magic_name__ = False
def a_ ( self ):
super().setUp()
snake_case = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
snake_case = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def a_ ( self , **__snake_case ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self , __snake_case ):
snake_case = '''adapt act apte'''
snake_case = '''adapt act apte'''
return input_text, output_text
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''adapt act apte'''
snake_case = ['''adapt''', '''act''', '''ap@@''', '''te''']
snake_case = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
snake_case = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
snake_case = '''I am a small frog.'''
snake_case = tok([src_text] , padding=__snake_case , truncation=__snake_case )['''input_ids''']
snake_case = tok.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ ( self ):
snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
snake_case = '''I am a small frog .'''
snake_case = '''.'''
snake_case = tok(__snake_case )['''input_ids''']
snake_case = tok(__snake_case )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 127
| 1
|
from collections import deque
from .hash_table import HashTable
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : int, *_UpperCAmelCase : str, **_UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
super().__init__(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : Union[str, Any], _UpperCAmelCase : int, _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = self.values[key]
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
sum(self.charge_factor - len(_UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def A_ ( self : Any, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=None ) -> List[Any]:
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCAmelCase, _UpperCAmelCase )
| 191
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[Any]=1_3, _UpperCAmelCase : Optional[Any]=3_0, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : str=3, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Any=5, _UpperCAmelCase : Optional[Any]=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Optional[int]="gelu", _UpperCAmelCase : int=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=1_0, _UpperCAmelCase : List[Any]=0.02, _UpperCAmelCase : List[Any]=None, ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : Any = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : str = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : str = num_patches + 1
def A_ ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, labels
def A_ ( self : int ) -> Tuple:
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, )
def A_ ( self : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMSNModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : List[Any], _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_UpperCAmelCase, labels=_UpperCAmelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ViTMSNModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def A_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTMSNModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def A_ ( self : Any ) -> Dict:
"""simple docstring"""
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE__ : List[str] = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Dict = image_processor(images=_UpperCAmelCase, return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**_UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _UpperCAmelCase, atol=1E-4 ) )
| 191
| 1
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__UpperCAmelCase = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__UpperCAmelCase = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0]
@deprecated(lowerCamelCase_ , """Please use tf.data to implement this functionality.""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
SCREAMING_SNAKE_CASE : int = _readaa(lowerCamelCase_ )
if magic != 20_51:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
SCREAMING_SNAKE_CASE : str = _readaa(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = _readaa(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = _readaa(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE : Dict = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE : Any = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
return data
@deprecated(lowerCamelCase_ , """Please use tf.one_hot on tensors.""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = labels_dense.shape[0]
SCREAMING_SNAKE_CASE : int = numpy.arange(lowerCamelCase_ ) * num_classes
SCREAMING_SNAKE_CASE : Any = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE : Dict = 1
return labels_one_hot
@deprecated(lowerCamelCase_ , """Please use tf.data to implement this functionality.""" )
def __A ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=10 ):
"""simple docstring"""
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
SCREAMING_SNAKE_CASE : Tuple = _readaa(lowerCamelCase_ )
if magic != 20_49:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
SCREAMING_SNAKE_CASE : Union[str, Any] = _readaa(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = bytestream.read(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ )
return labels
class UpperCamelCase__ :
"""simple docstring"""
@deprecated(
lowerCamelCase_ , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=dtypes.floataa , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = random_seed.get_seed(lowerCamelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE : Optional[int] = dtypes.as_dtype(lowerCamelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
SCREAMING_SNAKE_CASE : List[str] = 1_00_00
SCREAMING_SNAKE_CASE : Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE : Any = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE : Union[str, Any] = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE : str = numpy.multiply(lowerCamelCase_ , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE : int = images
SCREAMING_SNAKE_CASE : Optional[Any] = labels
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self._images
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self._labels
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._num_examples
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return self._epochs_completed
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=True ):
'''simple docstring'''
if fake_data:
SCREAMING_SNAKE_CASE : Tuple = [1] * 7_84
SCREAMING_SNAKE_CASE : Dict = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowerCamelCase_ )],
[fake_label for _ in range(lowerCamelCase_ )],
)
SCREAMING_SNAKE_CASE : Optional[int] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.images[perma]
SCREAMING_SNAKE_CASE : Any = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE : Tuple = self._num_examples - start
SCREAMING_SNAKE_CASE : Tuple = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE : int = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.images[perm]
SCREAMING_SNAKE_CASE : List[str] = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : List[str] = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE : List[str] = self._index_in_epoch
SCREAMING_SNAKE_CASE : List[str] = self._images[start:end]
SCREAMING_SNAKE_CASE : Any = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE : Tuple = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase_ , """Please write your own downloading logic.""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not gfile.Exists(lowerCamelCase_ ):
gfile.MakeDirs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not gfile.Exists(lowerCamelCase_ ):
urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310
with gfile.GFile(lowerCamelCase_ ) as f:
SCREAMING_SNAKE_CASE : Dict = f.size()
print("""Successfully downloaded""" , lowerCamelCase_ , lowerCamelCase_ , """bytes.""" )
return filepath
@deprecated(
lowerCamelCase_ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def __A ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=dtypes.floataa , lowerCamelCase_=True , lowerCamelCase_=50_00 , lowerCamelCase_=None , lowerCamelCase_=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fake()
SCREAMING_SNAKE_CASE : str = fake()
SCREAMING_SNAKE_CASE : List[str] = fake()
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE : int = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE : Optional[int] = """train-images-idx3-ubyte.gz"""
SCREAMING_SNAKE_CASE : List[Any] = """train-labels-idx1-ubyte.gz"""
SCREAMING_SNAKE_CASE : Optional[Any] = """t10k-images-idx3-ubyte.gz"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """t10k-labels-idx1-ubyte.gz"""
SCREAMING_SNAKE_CASE : Tuple = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file )
with gfile.Open(lowerCamelCase_ , """rb""" ) as f:
SCREAMING_SNAKE_CASE : Tuple = _extract_images(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase_ , """rb""" ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file )
with gfile.Open(lowerCamelCase_ , """rb""" ) as f:
SCREAMING_SNAKE_CASE : List[str] = _extract_images(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase_ , """rb""" ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
if not 0 <= validation_size <= len(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = (
"""Validation size should be between 0 and """
f'''{len(lowerCamelCase_ )}. Received: {validation_size}.'''
)
raise ValueError(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = train_images[:validation_size]
SCREAMING_SNAKE_CASE : str = train_labels[:validation_size]
SCREAMING_SNAKE_CASE : int = train_images[validation_size:]
SCREAMING_SNAKE_CASE : Any = train_labels[validation_size:]
SCREAMING_SNAKE_CASE : Tuple = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
SCREAMING_SNAKE_CASE : int = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
| 323
|
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323
| 1
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
UpperCamelCase__ : Optional[int] = 'sshleifer/mar_enro_6_3_student'
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Optional[int]:
super().setUp()
A_ : Any = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=_lowerCamelCase , )
A_ : List[Any] = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> List[str]:
MarianMTModel.from_pretrained(_lowerCamelCase )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : List[Any] = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
A_ : Optional[Any] = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
A_ : int = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
A_ : List[Any] = bash_script.replace(_lowerCamelCase , str(_lowerCamelCase ) )
A_ : int = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
A_ : Optional[Any] = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
A_ : Optional[Any] = ["""finetune.py"""] + bash_script.split() + args
with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase ):
A_ : Any = argparse.ArgumentParser()
A_ : Optional[int] = pl.Trainer.add_argparse_args(_lowerCamelCase )
A_ : List[Any] = SummarizationModule.add_model_specific_args(_lowerCamelCase , os.getcwd() )
A_ : Union[str, Any] = parser.parse_args()
A_ : Optional[Any] = main(_lowerCamelCase )
# Check metrics
A_ : int = load_json(model.metrics_save_path )
A_ : Tuple = metrics["""val"""][0]
A_ : Any = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _lowerCamelCase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
A_ : Tuple = os.listdir(_lowerCamelCase )
A_ : Optional[Any] = [x for x in contents if x.endswith(""".ckpt""" )][0]
A_ : Any = os.path.join(args.output_dir , _lowerCamelCase )
A_ : Tuple = torch.load(_lowerCamelCase , map_location="""cpu""" )
A_ : int = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
A_ : Tuple = {os.path.basename(_lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : List[Any] = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
A_ : Union[str, Any] = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
A_ : Union[str, Any] = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
A_ : Union[str, Any] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
A_ : Union[str, Any] = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
A_ : str = bash_script.replace(_lowerCamelCase , str(_lowerCamelCase ) )
A_ : int = self.get_auto_remove_tmp_dir()
A_ : Union[str, Any] = bash_script.replace("""--fp16""" , """""" )
A_ : Optional[Any] = 6
A_ : Union[str, Any] = (
["""distillation.py"""]
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
"""--gpus=1""",
"""--learning_rate=1e-3""",
F"--num_train_epochs={epochs}",
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase ):
A_ : Any = argparse.ArgumentParser()
A_ : Any = pl.Trainer.add_argparse_args(_lowerCamelCase )
A_ : str = SummarizationDistiller.add_model_specific_args(_lowerCamelCase , os.getcwd() )
A_ : Dict = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
A_ : Dict = distill_main(_lowerCamelCase )
# Check metrics
A_ : List[Any] = load_json(model.metrics_save_path )
A_ : Union[str, Any] = metrics["""val"""][0]
A_ : int = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _lowerCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
A_ : Optional[Any] = os.listdir(_lowerCamelCase )
A_ : List[Any] = [x for x in contents if x.endswith(""".ckpt""" )][0]
A_ : List[Any] = os.path.join(args.output_dir , _lowerCamelCase )
A_ : Dict = torch.load(_lowerCamelCase , map_location="""cpu""" )
A_ : str = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
A_ : Tuple = {os.path.basename(_lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 365
|
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCamelCase__ : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCamelCase__ : Tuple = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
UpperCamelCase__ : Optional[Any] = 'zero2'
UpperCamelCase__ : Optional[int] = 'zero3'
UpperCamelCase__ : Dict = [ZEROa, ZEROa]
def UpperCAmelCase ( a_ , a_ , a_ ) -> int:
"""simple docstring"""
A_ : int = parameterized.to_safe_name("""_""".join(str(a_ ) for x in param.args ) )
return F"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
UpperCamelCase__ : Tuple = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = True , ) -> List[str]:
A_ : Union[str, Any] = models[model]
A_ : Tuple = self.run_trainer(
stage=_lowerCamelCase , model_name=_lowerCamelCase , eval_steps=_lowerCamelCase , num_train_epochs=1 , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
self.do_checks(_lowerCamelCase )
return output_dir
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 , _lowerCamelCase = 1 , _lowerCamelCase = True , _lowerCamelCase = True , ) -> Any:
A_ : Dict = self.get_auto_remove_tmp_dir("""./xxx""" , after=_lowerCamelCase )
A_ : str = F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(_lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : List[str] = F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A_ : Union[str, Any] = [F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A_ : Tuple = self.get_launcher(_lowerCamelCase )
A_ : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCamelCase , env=self.get_env() )
return output_dir
def UpperCAmelCase_ ( self , _lowerCamelCase=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
A_ : int = min(2 , get_gpu_count() ) if distributed else 1
return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 164
| 0
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__ ( snake_case__ ):
def __init__( self , *_A , _A=None , _A=None , **_A ):
"""simple docstring"""
super().__init__(*_A , **_A )
__lowerCAmelCase = eval_examples
__lowerCAmelCase = post_process_function
def __SCREAMING_SNAKE_CASE( self , _A = None , _A=None , _A = None , _A = "eval" , **_A , ):
"""simple docstring"""
__lowerCAmelCase = gen_kwargs.copy()
__lowerCAmelCase = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
__lowerCAmelCase = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
__lowerCAmelCase = gen_kwargs
__lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
__lowerCAmelCase = self.get_eval_dataloader(_A )
__lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase = self.compute_metrics
__lowerCAmelCase = None
__lowerCAmelCase = time.time()
__lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowerCAmelCase = eval_loop(
_A , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__lowerCAmelCase = compute_metrics
__lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowerCAmelCase = self.post_process_function(_A , _A , _A )
__lowerCAmelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__lowerCAmelCase = metrics.pop(_A )
metrics.update(output.metrics )
else:
__lowerCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A = "test" , **_A ):
"""simple docstring"""
__lowerCAmelCase = gen_kwargs.copy()
__lowerCAmelCase = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase = self.compute_metrics
__lowerCAmelCase = None
__lowerCAmelCase = time.time()
__lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowerCAmelCase = eval_loop(
_A , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__lowerCAmelCase = compute_metrics
__lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowerCAmelCase = self.post_process_function(_A , _A , _A , "predict" )
__lowerCAmelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__lowerCAmelCase = metrics.pop(_A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
| 92
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] =ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = (3, 32, 128)
UpperCamelCase :Any = tempfile.mkdtemp()
# fmt: off
UpperCamelCase :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCamelCase :Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
UpperCamelCase :Tuple = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCamelCase :str = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase :List[Any] = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = self.get_tokenizer()
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase :Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase :int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :List[str] = self.get_tokenizer()
UpperCamelCase :str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = self.prepare_image_inputs()
UpperCamelCase :List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase :Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Union[str, Any] = self.get_tokenizer()
UpperCamelCase :int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = '''test'''
UpperCamelCase :Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = '''test'''
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Union[str, Any] = processor.char_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :Optional[Any] = self.get_tokenizer()
UpperCamelCase :Any = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = None
UpperCamelCase :List[Any] = self.prepare_image_inputs()
UpperCamelCase :Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.randn(1 , 27 , 38 )
UpperCamelCase :Union[str, Any] = torch.randn(1 , 27 , 5_0257 )
UpperCamelCase :Optional[Any] = torch.randn(1 , 27 , 3_0522 )
UpperCamelCase :Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 259
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=1_3 , SCREAMING_SNAKE_CASE__ : Any=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Dict=3_7 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=1_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any="divided_space_time" , SCREAMING_SNAKE_CASE__ : str=None , ) -> str:
a_ : int = parent
a_ : Tuple = batch_size
a_ : Union[str, Any] = image_size
a_ : Optional[int] = num_channels
a_ : Optional[int] = patch_size
a_ : List[str] = num_frames
a_ : int = is_training
a_ : Union[str, Any] = use_labels
a_ : Tuple = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : int = num_attention_heads
a_ : Any = intermediate_size
a_ : str = hidden_act
a_ : Any = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : Dict = attention_type
a_ : List[Any] = initializer_range
a_ : Optional[Any] = scope
a_ : Optional[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a_ : Any = (image_size // patch_size) ** 2
a_ : Union[str, Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : Tuple = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a_ : str = None
if self.use_labels:
a_ : Any = ids_tensor([self.batch_size] , self.num_labels )
a_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
a_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
a_ : str = self.num_labels
return config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
a_ : Optional[Any] = TimesformerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
a_ : Union[str, Any] = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
# verify the logits shape
a_ : Tuple = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
a_ : int = self.prepare_config_and_inputs()
a_ , a_ , a_ : List[Any] = config_and_inputs
a_ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Optional[int] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case__ : List[Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case__ : Optional[Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : Tuple = TimesformerModelTester(self )
a_ : List[str] = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Tuple:
a_ : List[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
a_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : str = model_class(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Any = [*signature.parameters.keys()]
a_ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Any = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
if not self.has_attentions:
pass
else:
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Optional[int] = True
for model_class in self.all_model_classes:
a_ : Optional[Any] = self.model_tester.seq_length
a_ : List[str] = self.model_tester.num_frames
a_ : Optional[int] = True
a_ : Optional[Any] = False
a_ : Tuple = True
a_ : int = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
a_ : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
a_ : Tuple = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a_ : Optional[Any] = True
a_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
a_ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
a_ : Optional[int] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
a_ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
# Check attention is always last and order is fine
a_ : List[Any] = True
a_ : Dict = True
a_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
a_ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE__ ) )
a_ : List[str] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
a_ : Any = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
a_ : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
a_ : Optional[int] = outputs.hidden_states
a_ : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
a_ : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : Optional[int] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a_ : Any = np.load(__A )
return list(__A )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : List[str] = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self.default_image_processor
a_ : int = prepare_video()
a_ : List[str] = image_processor(video[:8] , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
a_ : int = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
a_ : Any = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
a_ : List[str] = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 120
|
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
"""simple docstring"""
a_ : int = len(__A )
for _ in range(__A ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
a_ , a_ : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ : int = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 120
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase__ = """src/diffusers"""
# Matches is_xxx_available()
UpperCamelCase__ = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
UpperCamelCase__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
UpperCamelCase__ = """
{0} = None
"""
UpperCamelCase__ = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
UpperCamelCase__ = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = _re_backend.findall(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def _a ( ):
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
__lowerCAmelCase = 0
__lowerCAmelCase = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__lowerCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
__lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE_ ) and len(lines[line_index] ) > 1:
__lowerCAmelCase = lines[line_index]
__lowerCAmelCase = _re_single_line_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
__lowerCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE_ )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
if backend_specific_objects is None:
__lowerCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__lowerCAmelCase = {}
for backend, objects in backend_specific_objects.items():
__lowerCAmelCase = "[" + ", ".join(F"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]"
__lowerCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for o in objects] )
__lowerCAmelCase = dummy_file
return dummy_files
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ):
__lowerCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__lowerCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "utils" )
__lowerCAmelCase = {
backend: os.path.join(SCREAMING_SNAKE_CASE_ , F"""dummy_{short_names.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}_objects.py""" )
for backend in dummy_files.keys()
}
__lowerCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
else:
__lowerCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}_objects.py as the main """
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"""diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}_objects.py. Run `make fix-copies` """
"to fix this." )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCamelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 92
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCAmelCase ( UpperCamelCase__ ) -> list[list[float]]:
__lowerCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__lowerCamelCase , __lowerCamelCase = matrix[1][1], matrix[0][0]
__lowerCamelCase , __lowerCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__lowerCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
__lowerCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase__ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 67
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = generator('''Something there''' )
self.assertEqual(__a , [{'''generated_text''': ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
UpperCamelCase__ :str = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{'''generated_text''': ANY(__a )}, {'''generated_text''': ANY(__a )}],
[{'''generated_text''': ANY(__a )}, {'''generated_text''': ANY(__a )}],
] , )
UpperCamelCase__ :int = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{'''generated_text''': ANY(__a )}, {'''generated_text''': ANY(__a )}],
[{'''generated_text''': ANY(__a )}, {'''generated_text''': ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
UpperCamelCase__ :Tuple = generator('''Something there''' , do_sample=__a )
self.assertEqual(__a , [{'''generated_text''': ''''''}] )
UpperCamelCase__ :List[str] = 3
UpperCamelCase__ :List[str] = generator(
'''Something there''' , num_return_sequences=__a , num_beams=__a , )
UpperCamelCase__ :Any = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__a , __a )
UpperCamelCase__ :List[Any] = generator('''This is a test''' , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
UpperCamelCase__ :Dict = generator.model.config.eos_token_id
UpperCamelCase__ :Union[str, Any] = '''<pad>'''
UpperCamelCase__ :List[str] = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
UpperCamelCase__ :Dict = generator('''Something there''' , do_sample=__a )
self.assertEqual(__a , [{'''generated_text''': ''''''}] )
| 362
|
'''simple docstring'''
import socket
def a ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCamelCase__ :List[Any] = socket.gethostname()
UpperCamelCase__ :List[str] = 12312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCamelCase__ :str = sock.recv(1024 )
if not data:
break
out_file.write(__a )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 219
| 0
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def __lowerCamelCase ( ) -> Tuple:
__SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__SCREAMING_SNAKE_CASE :Any = parser.parse_args()
return args.f
class _SCREAMING_SNAKE_CASE( A ):
def _UpperCamelCase ( self ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = logging.StreamHandler(sys.stdout )
logger.addHandler(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'''run_glue_deebert.py''' )
with patch.object(SCREAMING_SNAKE_CASE__ ,'''argv''' ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(SCREAMING_SNAKE_CASE__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(SCREAMING_SNAKE_CASE__ )
| 191
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( a_ : Callable , a_ : float , a_ : float , a_ : float , a_ : float ) -> np.ndarray:
__SCREAMING_SNAKE_CASE :List[Any] = int(np.ceil((x_end - xa) / step_size ) )
__SCREAMING_SNAKE_CASE :Optional[Any] = np.zeros((n + 1,) )
__SCREAMING_SNAKE_CASE :int = ya
__SCREAMING_SNAKE_CASE :str = xa
for k in range(a_ ):
__SCREAMING_SNAKE_CASE :Optional[int] = y[k] + step_size * ode_func(a_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
| 1
|
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> List[str]:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
a_ : Union[str, Any] = 1
a_ : List[str] = 1
while repunit:
a_ : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] = 1_00_00_00 ) -> Union[str, Any]:
a_ : Union[str, Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'{solution() = }')
| 371
|
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120
| 0
|
_lowerCamelCase : str = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
_lowerCamelCase : Tuple = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
_lowerCamelCase : Union[str, Any] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
_lowerCamelCase : Any = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
_lowerCamelCase : List[str] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
_lowerCamelCase : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
_lowerCamelCase : List[str] = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
_lowerCamelCase : Union[str, Any] = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 336
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = str(id_ )
lowercase__ = None
lowercase__ = None
lowercase__ = []
lowercase__ = {} # {vertex:distance}
def __lt__( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Optional[Any]:
'''simple docstring'''
return self.id
def A__ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
self.neighbors.append(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__ = weight
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def _A ( lowercase__ , lowercase__ ):
lowercase__ = []
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = graph[:]
while q:
lowercase__ = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _A ( lowercase__ , lowercase__ ):
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
lowercase__ = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = KandinskyVaaPriorPipeline
A__ = ['''prompt''']
A__ = ['''prompt''', '''negative_prompt''']
A__ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
return 32
@property
def A_ ( self : Any ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self : str ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__snake_case : List[Any] = PriorTransformer(**__a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a )
return model
@property
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def A_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : List[str] = self.dummy_image_encoder
__snake_case : str = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : List[str] = self.dummy_image_processor
__snake_case : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , )
__snake_case : str = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A_ ( self : str ) -> Dict:
'''simple docstring'''
__snake_case : str = 'cpu'
__snake_case : List[str] = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : List[str] = output.image_embeds
__snake_case : str = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__snake_case : Union[str, Any] = image[0, -10:]
__snake_case : Any = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__snake_case : List[Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = torch_device == 'cpu'
__snake_case : Dict = True
__snake_case : Union[str, Any] = False
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , )
@skip_mps
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = torch_device == 'cpu'
__snake_case : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__a , test_mean_pixel_difference=__a , )
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0
| 1
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any=13 , lowerCamelCase : Any=7 , lowerCamelCase : List[Any]=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Tuple=True , lowerCamelCase : str=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[str]=99 , lowerCamelCase : Optional[int]=0 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[str]=5 , lowerCamelCase : Union[str, Any]=4 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=5_12 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : List[str]=2 , lowerCamelCase : List[str]=4 , lowerCamelCase : Union[str, Any]="last" , lowerCamelCase : Tuple=True , lowerCamelCase : List[str]=None , lowerCamelCase : List[str]=0 , ) -> Any:
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : Any = use_input_lengths
lowerCAmelCase_ : Dict = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Optional[int] = gelu_activation
lowerCAmelCase_ : List[str] = sinusoidal_embeddings
lowerCAmelCase_ : Tuple = causal
lowerCAmelCase_ : Optional[Any] = asm
lowerCAmelCase_ : List[Any] = n_langs
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : Union[str, Any] = n_special
lowerCAmelCase_ : Optional[int] = hidden_size
lowerCAmelCase_ : Optional[int] = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : List[str] = type_sequence_label_size
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : Optional[Any] = summary_type
lowerCAmelCase_ : str = use_proj
lowerCAmelCase_ : List[Any] = scope
lowerCAmelCase_ : int = bos_token_id
def __lowercase ( self : Dict ) -> Optional[int]:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_input_lengths:
lowerCAmelCase_ : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Tuple = None
if self.use_labels:
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowercase ( self : int , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : int , ) -> Tuple:
lowerCAmelCase_ : int = XLMModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : int = model(lowerCamelCase , lengths=lowerCamelCase , langs=lowerCamelCase )
lowerCAmelCase_ : List[Any] = model(lowerCamelCase , langs=lowerCamelCase )
lowerCAmelCase_ : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : str , ) -> Dict:
lowerCAmelCase_ : str = XLMWithLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , ) -> List[str]:
lowerCAmelCase_ : Optional[int] = XLMForQuestionAnsweringSimple(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Any = model(lowerCamelCase )
lowerCAmelCase_ : Dict = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
lowerCAmelCase_ : List[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : List[str] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , ) -> Optional[Any]:
lowerCAmelCase_ : List[Any] = XLMForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : int = model(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , p_mask=lowerCamelCase , )
lowerCAmelCase_ : List[Any] = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , )
((lowerCAmelCase_), ) : Any = result_with_labels.to_tuple()
lowerCAmelCase_ : Tuple = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
((lowerCAmelCase_), ) : List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowercase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : str , ) -> Optional[int]:
lowerCAmelCase_ : Optional[int] = XLMForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : str = model(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Dict , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : str , ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : str = XLMForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : int , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any] , ) -> List[str]:
lowerCAmelCase_ : Optional[int] = self.num_choices
lowerCAmelCase_ : Union[str, Any] = XLMForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : str = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
),
) : Optional[Any] = config_and_inputs
lowerCAmelCase_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase ( self : int , lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : Dict ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Any=False ) -> Union[str, Any]:
lowerCAmelCase_ : str = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowerCAmelCase_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
lowerCAmelCase_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
return inputs_dict
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = XLMModelTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self , config_class=lowerCamelCase , emb_dim=37 )
def __lowercase ( self : Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> int:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase )
def __lowercase ( self : Tuple ) -> List[Any]:
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase )
def __lowercase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase )
def __lowercase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase )
def __lowercase ( self : str ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase )
def __lowercase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase )
def __lowercase ( self : Any , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=False , lowerCamelCase : Dict=1 ) -> List[Any]:
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertListEqual(
[isinstance(lowerCamelCase , lowerCamelCase ) for iter_attentions in attentions] , [True] * len(lowerCamelCase ) )
self.assertEqual(len(lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase ):
# adds PAD dummy token
lowerCAmelCase_ : Union[str, Any] = min_length + idx + 1
lowerCAmelCase_ : List[str] = min_length + idx + 1
lowerCAmelCase_ : str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase ) )
def __lowercase ( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict=False , lowerCamelCase : Optional[int]=1 ) -> Dict:
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertListEqual(
[isinstance(lowerCamelCase , lowerCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase ) , )
self.assertEqual(len(lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase ):
# adds PAD dummy token
lowerCAmelCase_ : str = min_length + idx + 1
lowerCAmelCase_ : Tuple = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase ) , )
pass
@slow
def __lowercase ( self : Optional[int] ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Dict = XLMModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@slow
def __lowercase ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase_ : Dict = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCamelCase )
lowerCAmelCase_ : str = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCamelCase ) # the president
lowerCAmelCase_ : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowerCAmelCase_ : Dict = model.generate(lowerCamelCase , do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase )
| 120
|
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__A : Dict = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__A : List[Any] = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__A : str = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
"""simple docstring"""
def __lowercase ( self : str ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def __lowercase ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=1 , lowerCamelCase : Union[str, Any]="binary" , lowerCamelCase : Any=None , lowerCamelCase : str="warn" , ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = recall_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase , zero_division=lowerCamelCase , )
return {"recall": float(lowerCamelCase ) if score.size == 1 else score}
| 120
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __lowerCAmelCase :
def __init__( self :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :Optional[int]=13 , __magic_name__ :Union[str, Any]=7 , __magic_name__ :str=False , __magic_name__ :Optional[Any]=True , __magic_name__ :Optional[Any]=False , __magic_name__ :Dict=False , __magic_name__ :List[Any]=19 , __magic_name__ :List[str]=32 , __magic_name__ :Any=5 , __magic_name__ :str=4 , __magic_name__ :Union[str, Any]=37 , __magic_name__ :List[Any]="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :str=0.1 , __magic_name__ :Union[str, Any]=512 , __magic_name__ :Tuple=16 , __magic_name__ :Optional[Any]=2 , __magic_name__ :List[str]=0.02 , __magic_name__ :List[Any]=3 , __magic_name__ :Tuple=4 , __magic_name__ :Any=None , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__magic_name__ , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def lowerCamelCase__ ( self :int , __magic_name__ :List[Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :str ):
'''simple docstring'''
a = EsmForProteinFolding(config=__magic_name__ ).float()
model.to(__magic_name__ )
model.eval()
a = model(__magic_name__ , attention_mask=__magic_name__ )
a = model(__magic_name__ )
a = model(__magic_name__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCamelCase__ = ()
UpperCamelCase__ = {} if is_torch_available() else {}
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = EsmFoldModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip("""Does not support attention outputs""" )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
pass
@require_torch
class __lowerCAmelCase ( __magic_name__ ):
@slow
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
a = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a = model(__magic_name__ )["""positions"""]
a = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __magic_name__ , atol=1E-4 ) )
| 354
|
from __future__ import annotations
from typing import Generic, TypeVar
__UpperCamelCase : Union[str, Any] = TypeVar("T")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Tuple , __magic_name__ :T ):
'''simple docstring'''
a = data
a = self
a = 0
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Tuple ):
'''simple docstring'''
a = {}
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T ):
'''simple docstring'''
a = DisjointSetTreeNode(__magic_name__ )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :T ):
'''simple docstring'''
a = self.map[data]
if elem_ref != elem_ref.parent:
a = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :DisjointSetTreeNode[T] , __magic_name__ :DisjointSetTreeNode[T] ):
'''simple docstring'''
if nodea.rank > nodea.rank:
a = nodea
else:
a = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :T , __magic_name__ :T ):
'''simple docstring'''
self.link(self.find_set(__magic_name__ ) , self.find_set(__magic_name__ ) )
class __lowerCAmelCase ( Generic[T] ):
def __init__( self :Union[str, Any] ):
'''simple docstring'''
a = {}
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :T ):
'''simple docstring'''
if node not in self.connections:
a = {}
def lowerCamelCase__ ( self :Any , __magic_name__ :T , __magic_name__ :T , __magic_name__ :int ):
'''simple docstring'''
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
a = weight
a = weight
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = []
a = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __magic_name__ : x[2] )
# creating the disjoint set
a = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__magic_name__ )
# MST generation
a = 0
a = 0
a = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
a , a , a = edges[index]
index += 1
a = disjoint_set.find_set(__magic_name__ )
a = disjoint_set.find_set(__magic_name__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__magic_name__ , __magic_name__ , __magic_name__ )
disjoint_set.union(__magic_name__ , __magic_name__ )
return graph
| 347
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[int] = IFInpaintingPipeline
_lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
_lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowercase : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _lowercase ( self ) -> str:
'''simple docstring'''
return self._get_dummy_components()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
a__ : Any =torch.manual_seed(lowerCAmelCase__ )
else:
a__ : Dict =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a__ : int =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
a__ : Optional[Any] =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
a__ : str ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_local()
def _lowercase ( self ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 95
|
import doctest
from collections import deque
import numpy as np
class __snake_case :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(_lowercase , _lowercase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(_lowercase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(_lowercase )
for j, item in enumerate(_lowercase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(_lowercase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_lowercase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 219
| 0
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model"""}
a_ = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
a_ = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
_lowerCamelCase = []
def __init__( self , __lowerCamelCase , __lowerCamelCase="<unk>" , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="[MASK]" , __lowerCamelCase="[CLS]" , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__A : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__A : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__A : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
__A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__A : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
__A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , mask_token=__lowerCamelCase , cls_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
__A : Any = vocab_file
__A : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__A : Tuple = self.__dict__.copy()
__A : int = None
return state
def __setstate__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__A : Union[str, Any] = {}
__A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return self.sp_model.piece_to_id(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = []
__A : int = ''''''
__A : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
__A : Optional[Any] = True
__A : Optional[int] = []
else:
current_sub_tokens.append(__lowerCamelCase )
__A : List[Any] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = True , **__lowerCamelCase , ):
'''simple docstring'''
__A : Tuple = kwargs.pop('''use_source_tokenizer''' , __lowerCamelCase )
__A : Optional[Any] = self.convert_ids_to_tokens(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__A : Dict = []
__A : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
__A : Any = []
sub_texts.append(__lowerCamelCase )
else:
current_sub_text.append(__lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__A : int = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(__lowerCamelCase ) )
else:
__A : str = ''''''.join(__lowerCamelCase )
__A : Dict = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__A : Dict = self.clean_up_tokenization(__lowerCamelCase )
return clean_text
else:
return text
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
__A : Any = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
__A : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : Dict = [self.sep_token_id]
__A : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 291
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( snake_case_ : int ) ->str:
'''simple docstring'''
if not isinstance(snake_case_ ,snake_case_ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
__A : int = precision
__A : Tuple = ceil(precision / 14 )
__A : Dict = 426880 * Decimal(10005 ).sqrt()
__A : Optional[Any] = 1
__A : int = 13591409
__A : Optional[int] = Decimal(snake_case_ )
for k in range(1 ,snake_case_ ):
__A : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(snake_case_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a_ = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 291
| 1
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_A = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
_A = f"""https://www.google.com/search?q={query}&num=100"""
_A = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
_A = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
_A = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 62
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCAmelCase_ : int = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCamelCase , cache_dir=lowerCamelCase )
lowerCAmelCase_ : Dict = [t[-1] for t in os.walk(os.path.join(lowerCamelCase , os.listdir(lowerCamelCase )[0] , """snapshots""" ) )]
lowerCAmelCase_ : List[Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Any ) -> Optional[int]:
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCamelCase )
lowerCAmelCase_ : List[str] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : Tuple = jax.random.PRNGKey(0 )
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : Dict = jax.device_count()
lowerCAmelCase_ : Tuple = num_samples * [prompt]
lowerCAmelCase_ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowerCAmelCase_ : Optional[Any] = replicate(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = jax.random.split(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : int = shard(lowerCamelCase )
lowerCAmelCase_ : str = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
lowerCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase ) == num_samples
def __lowercase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase_, lowerCAmelCase_ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=lowerCamelCase )
lowerCAmelCase_ : Dict = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : List[Any] = 50
lowerCAmelCase_ : str = jax.device_count()
lowerCAmelCase_ : Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ : Dict = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowerCAmelCase_ : Optional[Any] = replicate(lowerCamelCase )
lowerCAmelCase_ : str = jax.random.split(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = shard(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def __lowercase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase )
lowerCAmelCase_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : Union[str, Any] = 50
lowerCAmelCase_ : Dict = jax.device_count()
lowerCAmelCase_ : List[Any] = num_samples * [prompt]
lowerCAmelCase_ : Tuple = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowerCAmelCase_ : Optional[int] = replicate(lowerCamelCase )
lowerCAmelCase_ : Any = jax.random.split(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[int] = shard(lowerCamelCase )
lowerCAmelCase_ : Any = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def __lowercase ( self : int ) -> Optional[int]:
lowerCAmelCase_, lowerCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
lowerCAmelCase_ : List[str] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : str = jax.random.PRNGKey(0 )
lowerCAmelCase_ : List[Any] = 50
lowerCAmelCase_ : Union[str, Any] = jax.device_count()
lowerCAmelCase_ : Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowerCAmelCase_ : List[Any] = replicate(lowerCamelCase )
lowerCAmelCase_ : List[str] = jax.random.split(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : List[str] = shard(lowerCamelCase )
lowerCAmelCase_ : Tuple = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def __lowercase ( self : List[str] ) -> Any:
lowerCAmelCase_ : List[Any] = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
lowerCAmelCase_, lowerCAmelCase_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , )
lowerCAmelCase_ : Any = scheduler.create_state()
lowerCAmelCase_ : Optional[Any] = scheduler_state
lowerCAmelCase_ : List[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : int = jax.random.PRNGKey(0 )
lowerCAmelCase_ : List[str] = 50
lowerCAmelCase_ : str = jax.device_count()
lowerCAmelCase_ : List[Any] = num_samples * [prompt]
lowerCAmelCase_ : int = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowerCAmelCase_ : List[str] = replicate(lowerCamelCase )
lowerCAmelCase_ : Any = jax.random.split(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = shard(lowerCamelCase )
lowerCAmelCase_ : Tuple = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def __lowercase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
lowerCAmelCase_ : Any = jax.device_count()
lowerCAmelCase_ : Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase , )
lowerCAmelCase_ : Optional[int] = replicate(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = shard(lowerCamelCase )
lowerCAmelCase_ : Tuple = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
lowerCAmelCase_ : Dict = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
lowerCAmelCase_, lowerCAmelCase_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCamelCase , use_memory_efficient_attention=lowerCamelCase , )
lowerCAmelCase_ : List[str] = replicate(lowerCamelCase )
lowerCAmelCase_ : Dict = pipeline.prepare_inputs(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = shard(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = pipeline(lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
lowerCAmelCase_ : List[Any] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 120
| 0
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = '''▁'''
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
_SCREAMING_SNAKE_CASE = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
_SCREAMING_SNAKE_CASE = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
_SCREAMING_SNAKE_CASE = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = ["input_ids"]
a : Dict = VOCAB_FILES_NAMES
a : List[str] = PRETRAINED_INIT_CONFIGURATION
a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str = PRETRAINED_VOCAB_FILES_MAP
a : str = RESOURCE_FILES_NAMES
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=None ,_lowerCamelCase=False ,_lowerCamelCase="utf8" ,_lowerCamelCase="[UNK]" ,_lowerCamelCase="[SEP]" ,_lowerCamelCase="[PAD]" ,_lowerCamelCase="[CLS]" ,_lowerCamelCase="[MASK]" ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ ,unk_token=lowercase_ ,sep_token=lowercase_ ,pad_token=lowercase_ ,cls_token=lowercase_ ,mask_token=lowercase_ ,vocab_file=lowercase_ ,encoding=lowercase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase_ ,)
__lowercase = do_lower_case
__lowercase = sentencepiece_model_ckpt
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__lowercase = self.load_vocab(filepath=lowercase_ )
else:
__lowercase = {self.sp_model.id_to_piece(lowercase_ ): id for id in range(self.sp_model.get_piece_size() )}
__lowercase = {v: k for k, v in self.vocab.items()}
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if text is None:
return None
__lowercase = self.tokenize(lowercase_ )
__lowercase , __lowercase = '''''', []
for i, ch in enumerate(lowercase_ ):
if ch in self.SP_CHAR_MAPPING:
__lowercase = self.SP_CHAR_MAPPING.get(lowercase_ )
else:
__lowercase = unicodedata.normalize('''NFKC''' ,lowercase_ )
if self.is_whitespace(lowercase_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowercase_ ) )
__lowercase , __lowercase , __lowercase = normalized_text, [], 0
if self.do_lower_case:
__lowercase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__lowercase = token[1:]
__lowercase = text[offset:].index(lowercase_ ) + offset
__lowercase = start + len(lowercase_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__lowercase = end
return token_mapping
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return len(self.vocab )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
return dict(self.vocab ,**self.added_tokens_encoder )
def __getstate__(self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__(self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(lowercase_ ,lowercase_ ) for c in text) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=False ,_lowerCamelCase=64 ,_lowerCamelCase=0.1 ) -> str:
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
__lowercase = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
__lowercase = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
__lowercase = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
__lowercase = self.sp_model.EncodeAsPieces(lowercase_ )
else:
__lowercase = self.sp_model.SampleEncodeAsPieces(lowercase_ ,lowercase_ ,lowercase_ )
__lowercase = []
for pi, piece in enumerate(lowercase_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowercase_ ) and pi != 0:
new_pieces.append(lowercase_ )
continue
else:
continue
__lowercase = 0
for i, chunk in enumerate(lowercase_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowercase_ ) or self.is_punct(lowercase_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(lowercase_ )
__lowercase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__lowercase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__lowercase = i
if len(lowercase_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = ''''''.join(lowercase_ ).replace(lowercase_ ,''' ''' ).strip()
return out_string
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = self.convert_ids_to_tokens(lowercase_ )
__lowercase = ''''''.join(lowercase_ ).replace(lowercase_ ,''' ''' ).strip()
return out_string
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
return self.vocab.get(lowercase_ ,self.vocab.get(self.unk_token ) )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
return self.reverse_vocab.get(lowercase_ ,self.unk_token )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> Tuple:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> Any:
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ,_lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowercase_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowercase_ ) + 1) + [1] * (len(lowercase_ ) + 3)
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _UpperCAmelCase (self ,_lowerCamelCase ) -> str:
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowercase_ ) == 1:
__lowercase = unicodedata.category(lowercase_ )
if cat == "Zs":
return True
return False
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = {}
with io.open(lowercase_ ,'''r''' ,encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
__lowercase = line.rstrip('''\n''' )
__lowercase = int(lowercase_ )
return token_to_idx
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = 0
if os.path.isdir(lowercase_ ):
__lowercase = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__lowercase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(lowercase_ ,'''w''' ,encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() ,key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
''' Please check that the vocabulary is not corrupted!''' )
__lowercase = token_index
writer.write(token + '''\n''' )
index += 1
__lowercase = os.path.join(lowercase_ ,'''sentencepiece.bpe.model''' )
with open(lowercase_ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (vocab_file,)
| 369
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 217
| 0
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaPriorPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''', '''negative_prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a = PriorTransformer(**__UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a = CLIPVisionModelWithProjection(__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_image_processor
a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , )
a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str=0 ) ->int:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.image_embeds
a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
a = image[0, -10:]
a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
a = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
@skip_mps
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0
| 1
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase : Tuple = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase ):
_a : Optional[Any]= ["input_features", "attention_mask"]
def __init__( self ,snake_case=80 ,snake_case=16000 ,snake_case=0.0 ,snake_case=10 ,snake_case=25 ,snake_case="hamming_window" ,snake_case=32_768.0 ,snake_case=0.97 ,snake_case=1.0 ,snake_case=True ,snake_case=True ,snake_case=False ,**snake_case ,):
'''simple docstring'''
super().__init__(feature_size=snake_case ,sampling_rate=snake_case ,padding_value=snake_case ,**snake_case )
lowercase : Optional[Any] = feature_size
lowercase : List[Any] = sampling_rate
lowercase : int = padding_value
lowercase : Dict = hop_length
lowercase : List[str] = win_length
lowercase : List[Any] = frame_signal_scale
lowercase : List[Any] = preemphasis_coeff
lowercase : str = mel_floor
lowercase : int = normalize_means
lowercase : List[Any] = normalize_vars
lowercase : List[Any] = win_function
lowercase : int = return_attention_mask
lowercase : Any = win_length * sampling_rate // 1000
lowercase : Tuple = hop_length * sampling_rate // 1000
lowercase : Tuple = optimal_fft_length(self.sample_size )
lowercase : Dict = (self.n_fft // 2) + 1
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.win_function == "hamming_window":
lowercase : Optional[Any] = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=snake_case )
else:
lowercase : Optional[Any] = window_function(window_length=self.sample_size ,name=self.win_function )
lowercase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowercase : Dict = spectrogram(
one_waveform * self.frame_signal_scale ,window=snake_case ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=snake_case ,preemphasis=self.preemphasis_coeff ,mel_filters=snake_case ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if self.normalize_means:
lowercase : List[Any] = x[:input_length].mean(axis=0 )
lowercase : Dict = np.subtract(snake_case ,snake_case )
if self.normalize_vars:
lowercase : List[Any] = x[:input_length].std(axis=0 )
lowercase : List[Any] = np.divide(snake_case ,snake_case )
if input_length < x.shape[0]:
lowercase : Any = padding_value
# make sure array is in float32
lowercase : Tuple = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case ,snake_case ,self.padding_value ) for x, n in zip(snake_case ,snake_case )]
def __call__( self ,snake_case ,snake_case = False ,snake_case = None ,snake_case = False ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,**snake_case ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowercase : List[Any] = isinstance(snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowercase : str = is_batched_numpy or (
isinstance(snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray(snake_case ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case ,np.ndarray ):
lowercase : int = np.asarray(snake_case ,dtype=np.floataa )
elif isinstance(snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Dict = [raw_speech]
# extract fbank features
lowercase : Tuple = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase : Union[str, Any] = BatchFeature({"""input_features""": features} )
lowercase : Optional[int] = self.pad(
snake_case ,padding=snake_case ,max_length=snake_case ,truncation=snake_case ,pad_to_multiple_of=snake_case ,return_attention_mask=snake_case ,**snake_case ,)
# make sure list is in array format
lowercase : Tuple = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,snake_case ):
lowercase : List[Any] = [np.asarray(snake_case ,dtype=np.floataa ) for feature in input_features]
lowercase : int = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowercase : Any = [np.asarray(snake_case ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase : List[str] = (
np.array(snake_case ,dtype=np.intaa )
if self._get_padding_strategies(snake_case ,max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase : List[str] = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=snake_case )
if return_tensors is not None:
lowercase : str = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 285
|
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
def count_of_possible_combinations(SCREAMING_SNAKE_CASE__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE__ )
for item in array )
lowercase : Optional[int] = answer
return answer
lowercase : int = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : str = [0] * (target + 1)
lowercase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Any = 3
lowercase : Optional[Any] = 5
lowercase : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 285
| 1
|
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A_ = 'src/diffusers'
# Matches is_xxx_available()
A_ = re.compile(R"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
A_ = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
A_ = '\n{0} = None\n'
A_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
A_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Tuple = _re_backend.findall(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(_SCREAMING_SNAKE_CASE )
def A_ ( ):
with open(os.path.join(_SCREAMING_SNAKE_CASE , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:List[Any] = f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE:str = {}
# Go through the end of the file
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE:Union[str, Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = []
# Until we unindent, add backend objects to the list
while line_index < len(_SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE:Union[str, Any] = lines[line_index]
SCREAMING_SNAKE_CASE:Optional[Any] = _re_single_line_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE:Optional[Any] = objects
else:
line_index += 1
return backend_specific_objects
def A_ ( snake_case , snake_case ):
if name.isupper():
return DUMMY_CONSTANT.format(_SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A_ ( snake_case=None ):
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE:str = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE:Tuple = {}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE:Optional[int] = "[" + ", ".join(F'''\"{b}\"''' for b in backend.split("_and_" ) ) + "]"
SCREAMING_SNAKE_CASE:Dict = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for o in objects] )
SCREAMING_SNAKE_CASE:Dict = dummy_file
return dummy_files
def A_ ( snake_case=False ):
SCREAMING_SNAKE_CASE:Any = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE:Dict = {"torch": "pt"}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE:Dict = os.path.join(_SCREAMING_SNAKE_CASE , "utils" )
SCREAMING_SNAKE_CASE:Union[str, Any] = {
backend: os.path.join(_SCREAMING_SNAKE_CASE , F'''dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py''' )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE:str = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:List[Any] = f.read()
else:
SCREAMING_SNAKE_CASE:List[Any] = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py as the main '''
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'''diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '''
"to fix this." )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A_ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 139
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = SpeechTaTokenizer
__lowercase: int = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ )
snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
snake_case_ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """this is a test"""
snake_case_ = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 81 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
| 347
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
snake_case__ : Any = None
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Union[str, Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
snake_case__ : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
snake_case__ : int = '''▁'''
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Any = VOCAB_FILES_NAMES
lowerCamelCase_ :Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ :str = AlbertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_="[CLS]" , snake_case_="[SEP]" , snake_case_="<unk>" , snake_case_="[SEP]" , snake_case_="<pad>" , snake_case_="[CLS]" , snake_case_="[MASK]" , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
UpperCAmelCase_ : Dict = do_lower_case
UpperCAmelCase_ : Optional[int] = remove_space
UpperCAmelCase_ : Union[str, Any] = keep_accents
UpperCAmelCase_ : Union[str, Any] = vocab_file
UpperCAmelCase_ : List[Any] = False if not self.vocab_file else True
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Union[str, Any] = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 274
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
snake_case__ : str = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case__ : int = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case__ : Tuple = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=1 , snake_case_="binary" , snake_case_=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = fa_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ )
return {"f1": float(snake_case_ ) if score.size == 1 else score}
| 274
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-1_2 , _a=16 , _a=True , _a=10 , _a=10 , _a=1_024 , _a=128 , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = patch_size
lowerCamelCase = qkv_bias
lowerCamelCase = frequency_stride
lowerCamelCase = time_stride
lowerCamelCase = max_length
lowerCamelCase = num_mel_bins
| 291
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tempfile.mkdtemp()
# fmt: off
lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_image_processor()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(_a , return_tensors="""np""" )
lowerCamelCase = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = processor(text=_a )
lowerCamelCase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(_a )
lowerCamelCase = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 291
| 1
|
"""simple docstring"""
from math import isclose, sqrt
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> tuple[float, float, float]:
_lowerCAmelCase =point_y / 4 / point_x
_lowerCAmelCase =2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_lowerCAmelCase =(1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_lowerCAmelCase =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_lowerCAmelCase =outgoing_gradient**2 + 4
_lowerCAmelCase =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_lowerCAmelCase =(point_y - outgoing_gradient * point_x) ** 2 - 100
_lowerCAmelCase =(
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_lowerCAmelCase =(
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_lowerCAmelCase =x_minus if isclose(__UpperCamelCase , __UpperCamelCase ) else x_plus
_lowerCAmelCase =point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _lowerCamelCase(__UpperCamelCase = 1.4 , __UpperCamelCase = -9.6 ) -> int:
_lowerCAmelCase =0
_lowerCAmelCase =first_x_coord
_lowerCAmelCase =first_y_coord
_lowerCAmelCase =(10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =next_point(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 341
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" )
_lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" )
_lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" )
_lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" )
_lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item
_lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" )
_lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" )
_lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCAmelCase =old_checkpoint[path]
_lowerCAmelCase =old_tensor.shape[0] // 3
_lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 )
_lowerCAmelCase =query.reshape(__UpperCamelCase )
_lowerCAmelCase =key.reshape(__UpperCamelCase )
_lowerCAmelCase =value.reshape(__UpperCamelCase )
for path in paths:
_lowerCAmelCase =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0]
else:
_lowerCAmelCase =old_checkpoint[path["""old"""]]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase =checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.2.bias"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase =checkpoint["""out.0.weight"""]
_lowerCAmelCase =checkpoint["""out.0.bias"""]
_lowerCAmelCase =checkpoint["""out.2.weight"""]
_lowerCAmelCase =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
_lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
_lowerCAmelCase =middle_blocks[0]
_lowerCAmelCase =middle_blocks[1]
_lowerCAmelCase =middle_blocks[2]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
_lowerCAmelCase =i // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =i % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
_lowerCAmelCase ={}
for layer in output_block_layers:
_lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
_lowerCAmelCase =[layer_name]
if len(__UpperCamelCase ) > 1:
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
_lowerCAmelCase =[]
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
_lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
_lowerCAmelCase =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 341
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''openai-gpt'''
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , A=4_0478 , A=512 , A=768 , A=12 , A=12 , A="gelu" , A=0.1 , A=0.1 , A=0.1 , A=1e-5 , A=0.02 , A="cls_index" , A=True , A=None , A=True , A=0.1 , **A , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = n_positions
_SCREAMING_SNAKE_CASE = n_embd
_SCREAMING_SNAKE_CASE = n_layer
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = afn
_SCREAMING_SNAKE_CASE = resid_pdrop
_SCREAMING_SNAKE_CASE = embd_pdrop
_SCREAMING_SNAKE_CASE = attn_pdrop
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = summary_type
_SCREAMING_SNAKE_CASE = summary_use_proj
_SCREAMING_SNAKE_CASE = summary_activation
_SCREAMING_SNAKE_CASE = summary_first_dropout
_SCREAMING_SNAKE_CASE = summary_proj_to_labels
super().__init__(**A )
| 58
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__A = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Any = RobertaTokenizer
def __init__( self : Optional[int] , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int="replace" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : int="<mask>" , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Tuple , )-> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: str = getattr(UpperCamelCase__ , pre_tok_state.pop("type"))
__lowerCAmelCase: Optional[int] = add_prefix_space
__lowerCAmelCase: Dict = pre_tok_class(**UpperCamelCase__)
__lowerCAmelCase: Any = add_prefix_space
__lowerCAmelCase: int = "post_processor"
__lowerCAmelCase: Optional[Any] = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
if tokenizer_component_instance:
__lowerCAmelCase: Dict = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase: List[Any] = tuple(state["sep"])
if "cls" in state:
__lowerCAmelCase: str = tuple(state["cls"])
__lowerCAmelCase: str = False
if state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Optional[Any] = add_prefix_space
__lowerCAmelCase: List[str] = True
if state.get("trim_offsets" , UpperCamelCase__) != trim_offsets:
__lowerCAmelCase: Any = trim_offsets
__lowerCAmelCase: List[str] = True
if changes_to_apply:
__lowerCAmelCase: str = getattr(UpperCamelCase__ , state.pop("type"))
__lowerCAmelCase: List[str] = component_class(**UpperCamelCase__)
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
@property
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def lowercase_ ( self : Tuple , UpperCamelCase__ : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else value
__lowerCAmelCase: int = value
def lowercase_ ( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: List[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
__lowerCAmelCase: str = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__)
return tuple(UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=None)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = [self.sep_token_id]
__lowerCAmelCase: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 217
| 0
|
"""simple docstring"""
_lowerCAmelCase :Any = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 362
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase : Optional[Any] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(A ) , torch_builtin(A ) ) )
self.assertFalse(torch.allclose(gelu_python(A ) , gelu_new(A ) ) )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase : Tuple = get_activation('''gelu''' )
_UpperCAmelCase : int = get_activation('''gelu_10''' )
_UpperCAmelCase : Optional[int] = torch_builtin(A )
_UpperCAmelCase : Optional[int] = geluaa(A )
_UpperCAmelCase : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(A ):
get_activation('''bogus''' )
with self.assertRaises(A ):
get_activation(A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = get_activation('''gelu''' )
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : int = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(A ):
_UpperCAmelCase : Any = acta.a
| 68
| 0
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
def is_in_circle(UpperCamelCase__ , UpperCamelCase__ ) -> bool:
snake_case_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(UpperCamelCase__ , UpperCamelCase__ ) ) for _ in range(UpperCamelCase__ ) ) * (max_value - min_value)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 1.0 ):
'''simple docstring'''
def identity_function(UpperCamelCase__ ) -> float:
return x
snake_case_ = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
def function_to_integrate(UpperCamelCase__ ) -> float:
return sqrt(4.0 - x * x )
snake_case_ = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger()
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self , snake_case ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def a ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
def __call__( self , snake_case ):
snake_case_ = Tracker(self.dest )(snake_case ).parametrized
snake_case_ = Tracker(self.src )(snake_case ).parametrized
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) )
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(snake_case )} operations while'''
F''' destination module has {len(snake_case )}.''' )
for dest_m, src_m in zip(snake_case , snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval()
snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
snake_case_ = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , )
print(F'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = 1000
snake_case_ = (1, num_labels)
snake_case_ = 'huggingface/label-files'
snake_case_ = num_labels
snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
snake_case_ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 285
| 1
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase__ , max_perimeter + 1 ):
lowercase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase__ ):
lowercase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = pythagorean_triple(lowerCAmelCase__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 371
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ :Dict = logging.get_logger(__name__)
lowercase__ :Optional[Any] = "▁"
lowercase__ :str = {"vocab_file": "prophetnet.tokenizer"}
lowercase__ :List[str] = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
lowercase__ :Optional[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
lowercase__ :List[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = collections.OrderedDict()
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
lowercase = token.rstrip('''\n''' )
lowercase = index
return vocab
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] =VOCAB_FILES_NAMES
lowercase_ : Any =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str =['''input_ids''', '''attention_mask''']
def __init__( self ,A__ ,A__="[SEP]" ,A__="[SEP]" ,A__="[SEP]" ,A__="[UNK]" ,A__="[PAD]" ,A__="[CLS]" ,A__="[MASK]" ,A__ = None ,**A__ ,):
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ ,eos_token=A__ ,sep_token=A__ ,unk_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''')
raise
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(A__))
lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowercase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(1_0):
lowercase = f'[unused{i}]'
lowercase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowercase = 1_2
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A__)
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''')
raise
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__)
if token_ids_a is None:
return ([0] * len(A__)) + [1]
return ([0] * len(A__)) + [1] + ([0] * len(A__)) + [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(A__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self ,A__):
return self.sp_model.encode(A__ ,out_type=A__)
def A__ ( self ,A__):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(A__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self ,A__):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self ,A__):
lowercase = ''''''.join(A__).replace(A__ ,''' ''').strip()
return out_string
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,A__)
elif not os.path.isfile(self.vocab_file):
with open(A__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__)
return (out_vocab_file,)
def A__ ( self ,A__ ,A__ = None):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowercase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 97
| 0
|
from __future__ import annotations
A : Optional[int] = list[tuple[int, int]]
A : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Union[str, Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A :
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : Node | None , ) -> Dict:
"""simple docstring"""
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = g_cost
A__ = parent
A__ = self.calculate_heuristic()
def a_ ( self : Optional[int] ) -> float:
"""simple docstring"""
A__ = abs(self.pos_x - self.goal_x )
A__ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , __lowerCAmelCase : List[str] ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
'''simple docstring'''
def __init__( self : List[str] , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : tuple[int, int] ) -> List[Any]:
"""simple docstring"""
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCAmelCase )
A__ = [self.start]
A__ = []
A__ = False
def a_ ( self : Union[str, Any] ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A__ = True
return self.retrace_path(__lowerCAmelCase )
self.closed_nodes.append(__lowerCAmelCase )
A__ = self.get_successors(__lowerCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
A__ = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCAmelCase )
else:
self.open_nodes.append(__lowerCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def a_ ( self : Tuple , __lowerCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) )
return successors
def a_ ( self : str , __lowerCAmelCase : Node | None ) -> Path:
"""simple docstring"""
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A : Any = (0, 0)
A : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
A : Any = GreedyBestFirst(init, goal)
A : List[str] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
A : Union[str, Any] = 2
for elem in grid:
print(elem)
| 274
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A : List[str] = input('''Enter image url: ''').strip()
print(F'''Downloading image from {url} ...''')
A : Any = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
A : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
A : Dict = requests.get(image_url).content
A : Tuple = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 274
| 1
|
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__snake_case = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
__snake_case = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
__snake_case = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
__snake_case = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__=0.9 , snake_case__=3 , snake_case__=0.5 ) -> List[Any]:
'''simple docstring'''
if NLTK_VERSION >= version.Version('''3.6.5''' ):
UpperCAmelCase : int =[
meteor_score.single_meteor_score(
word_tokenize(__UpperCAmelCase ) , word_tokenize(__UpperCAmelCase ) , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , gamma=__UpperCAmelCase )
for ref, pred in zip(__UpperCAmelCase , __UpperCAmelCase )
]
else:
UpperCAmelCase : Dict =[
meteor_score.single_meteor_score(__UpperCAmelCase , __UpperCAmelCase , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , gamma=__UpperCAmelCase )
for ref, pred in zip(__UpperCAmelCase , __UpperCAmelCase )
]
return {"meteor": np.mean(__UpperCAmelCase )}
| 366
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 78
| 0
|
'''simple docstring'''
from math import isclose, sqrt
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = point_y / 4 / point_x
_snake_case = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_snake_case = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_snake_case = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_snake_case = outgoing_gradient**2 + 4
_snake_case = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_snake_case = (point_y - outgoing_gradient * point_x) ** 2 - 100
_snake_case = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_snake_case = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_snake_case = x_minus if isclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else x_plus
_snake_case = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 1.4 , _SCREAMING_SNAKE_CASE = -9.6 ):
_snake_case = 0
_snake_case = first_x_coord
_snake_case = first_y_coord
_snake_case = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_snake_case, _snake_case, _snake_case = next_point(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 341
|
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> int:
_snake_case = data
_snake_case = previous
_snake_case = next_node
def __str__(self ) -> str:
return f"""{self.data}"""
def lowercase (self ) -> int:
return self.data
def lowercase (self ) -> Dict:
return self.next
def lowercase (self ) -> Union[str, Any]:
return self.previous
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> List[str]:
_snake_case = head
def __iter__(self ) -> Optional[Any]:
return self
def lowercase (self ) -> str:
if not self.current:
raise StopIteration
else:
_snake_case = self.current.get_data()
_snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self ) -> Optional[int]:
_snake_case = None # First node in list
_snake_case = None # Last node in list
def __str__(self ) -> Optional[int]:
_snake_case = self.head
_snake_case = []
while current is not None:
nodes.append(current.get_data() )
_snake_case = current.get_next()
return " ".join(str(UpperCAmelCase ) for node in nodes )
def __contains__(self , UpperCAmelCase ) -> int:
_snake_case = self.head
while current:
if current.get_data() == value:
return True
_snake_case = current.get_next()
return False
def __iter__(self ) -> Union[str, Any]:
return LinkedListIterator(self.head )
def lowercase (self ) -> str:
if self.head:
return self.head.get_data()
return None
def lowercase (self ) -> List[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowercase (self , UpperCAmelCase ) -> None:
if self.head is None:
_snake_case = node
_snake_case = node
else:
self.insert_before_node(self.head , UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> None:
if self.head is None:
self.set_head(UpperCAmelCase )
else:
self.insert_after_node(self.tail , UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> None:
_snake_case = Node(UpperCAmelCase )
if self.head is None:
self.set_head(UpperCAmelCase )
else:
self.set_tail(UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = node
_snake_case = node.previous
if node.get_previous() is None:
_snake_case = node_to_insert
else:
_snake_case = node_to_insert
_snake_case = node_to_insert
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = node
_snake_case = node.next
if node.get_next() is None:
_snake_case = node_to_insert
else:
_snake_case = node_to_insert
_snake_case = node_to_insert
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = 1
_snake_case = Node(UpperCAmelCase )
_snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCAmelCase , UpperCAmelCase )
return
current_position += 1
_snake_case = node.next
self.insert_after_node(self.tail , UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Node:
_snake_case = self.head
while node:
if node.get_data() == item:
return node
_snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
if (node := self.get_node(UpperCAmelCase )) is not None:
if node == self.head:
_snake_case = self.head.get_next()
if node == self.tail:
_snake_case = self.tail.get_previous()
self.remove_node_pointers(UpperCAmelCase )
@staticmethod
def lowercase (UpperCAmelCase ) -> None:
if node.get_next():
_snake_case = node.previous
if node.get_previous():
_snake_case = node.next
_snake_case = None
_snake_case = None
def lowercase (self ) -> Dict:
return self.head is None
def __SCREAMING_SNAKE_CASE ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( _lowerCAmelCase ,unittest.TestCase ):
__A : Optional[int] = DanceDiffusionPipeline
__A : Dict = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : List[str] = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
__A : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : Any = False
__A : Optional[Any] = False
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase__ : Dict = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , use_timestep_embedding=SCREAMING_SNAKE_CASE_ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
lowercase__ : Any = IPNDMScheduler()
lowercase__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def __UpperCamelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : Any=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE_ ).startswith("mps" ):
lowercase__ : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def __UpperCamelCase ( self : str ) -> Dict:
lowercase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : List[str] = DanceDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE_ )
lowercase__ : int = output.audios
lowercase__ : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowercase__ : List[Any] = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __UpperCamelCase ( self : Tuple ) -> Any:
return super().test_save_load_local()
@skip_mps
def __UpperCamelCase ( self : List[str] ) -> Any:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
return super().test_save_load_optional_components()
@skip_mps
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
return super().test_attention_slicing_forward_pass()
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Any = torch_device
lowercase__ : Tuple = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
lowercase__ : int = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Union[str, Any] = pipe(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
lowercase__ : Optional[Any] = output.audios
lowercase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase__ : Optional[Any] = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
lowercase__ : Dict = torch_device
lowercase__ : Optional[Any] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
lowercase__ : Dict = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : int = pipe(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
lowercase__ : int = output.audios
lowercase__ : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase__ : Dict = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 366
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333
| 0
|
import string
import numpy
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __a )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase_ = numpy.vectorize(lambda __SCREAMING_SNAKE_CASE : x % 36 )
lowercase_ = numpy.vectorize(__SCREAMING_SNAKE_CASE )
def __init__(self : Union[str, Any] , UpperCAmelCase_ : numpy.ndarray) ->None:
'''simple docstring'''
lowerCamelCase__: int =self.modulus(UpperCAmelCase_) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCamelCase__: Dict =encrypt_key.shape[0]
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : str) ->int:
'''simple docstring'''
return self.key_string.index(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int) ->str:
'''simple docstring'''
return self.key_string[round(UpperCAmelCase_)]
def SCREAMING_SNAKE_CASE_ (self : int) ->None:
'''simple docstring'''
lowerCamelCase__: str =round(numpy.linalg.det(self.encrypt_key))
if det < 0:
lowerCamelCase__: str =det % len(self.key_string)
lowerCamelCase__: Optional[int] =len(self.key_string)
if greatest_common_divisor(UpperCAmelCase_ , len(self.key_string)) != 1:
lowerCamelCase__: Tuple =(
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
lowerCamelCase__: int =[char for char in text.upper() if char in self.key_string]
lowerCamelCase__: List[str] =chars[-1]
while len(UpperCAmelCase_) % self.break_key != 0:
chars.append(UpperCAmelCase_)
return "".join(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
lowerCamelCase__: Any =self.process_text(text.upper())
lowerCamelCase__: List[str] =""
for i in range(0 , len(UpperCAmelCase_) - self.break_key + 1 , self.break_key):
lowerCamelCase__: Union[str, Any] =text[i : i + self.break_key]
lowerCamelCase__: Optional[int] =[self.replace_letters(UpperCAmelCase_) for char in batch]
lowerCamelCase__: Any =numpy.array([vec]).T
lowerCamelCase__: Union[str, Any] =self.modulus(self.encrypt_key.dot(UpperCAmelCase_)).T.tolist()[
0
]
lowerCamelCase__: Dict ="".join(
self.replace_digits(UpperCAmelCase_) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->numpy.ndarray:
'''simple docstring'''
lowerCamelCase__: Tuple =round(numpy.linalg.det(self.encrypt_key))
if det < 0:
lowerCamelCase__: str =det % len(self.key_string)
lowerCamelCase__: Optional[int] =None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
lowerCamelCase__: Optional[int] =i
break
lowerCamelCase__: str =(
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
lowerCamelCase__: Any =self.make_decrypt_key()
lowerCamelCase__: Any =self.process_text(text.upper())
lowerCamelCase__: Dict =""
for i in range(0 , len(UpperCAmelCase_) - self.break_key + 1 , self.break_key):
lowerCamelCase__: List[str] =text[i : i + self.break_key]
lowerCamelCase__: List[str] =[self.replace_letters(UpperCAmelCase_) for char in batch]
lowerCamelCase__: Dict =numpy.array([vec]).T
lowerCamelCase__: Any =self.modulus(decrypt_key.dot(UpperCAmelCase_)).T.tolist()[0]
lowerCamelCase__: int ="".join(
self.replace_digits(UpperCAmelCase_) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =int(input("Enter the order of the encryption key: " ) )
lowerCamelCase__: str =[]
print("Enter each row of the encryption key with space separated integers" )
for _ in range(__a ):
lowerCamelCase__: List[Any] =[int(__a ) for x in input().split()]
hill_matrix.append(__a )
lowerCamelCase__: Dict =HillCipher(numpy.array(__a ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
lowerCamelCase__: List[str] =input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
lowerCamelCase__: Union[str, Any] =input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(__a ) )
elif option == "2":
lowerCamelCase__: Optional[int] =input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 10
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]:
'''simple docstring'''
A__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A__ = F'{src_lang}-{tgt_lang}'
A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(F'Generating {path}' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase__ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""")
lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 68
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Tuple = 'markuplm'
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-1_2 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase=256 , _UpperCamelCase=1024 , _UpperCamelCase=216 , _UpperCamelCase=1001 , _UpperCamelCase=32 , _UpperCamelCase=50 , _UpperCamelCase="absolute" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(
pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase , )
_lowercase : Optional[int] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Union[str, Any] = hidden_act
_lowercase : List[Any] = intermediate_size
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Tuple = type_vocab_size
_lowercase : List[Any] = initializer_range
_lowercase : Dict = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : Dict = use_cache
_lowercase : Any = classifier_dropout
# additional properties
_lowercase : Optional[int] = max_depth
_lowercase : int = max_xpath_tag_unit_embeddings
_lowercase : int = max_xpath_subs_unit_embeddings
_lowercase : Optional[Any] = tag_pad_id
_lowercase : List[str] = subs_pad_id
_lowercase : Dict = xpath_unit_hidden_size
| 199
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_snake_case = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
_snake_case = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
_snake_case = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase="uniform_average" , _UpperCamelCase=True ):
"""simple docstring"""
_lowercase : str = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse}
| 199
| 1
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_a = -1
_a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_a = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_a = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_a = TextStreamer(__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_a = cs.out[:-1]
self.assertEqual(__a , __a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_a = -1
_a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_a = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_a = tokenizer.decode(greedy_ids[0] )
_a = TextIteratorStreamer(__a )
_a = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_a = Thread(target=model.generate , kwargs=__a )
thread.start()
_a = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a , __a )
def UpperCamelCase__ ( self : str ):
_a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_a = -1
_a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_a = model.generate(__a , max_new_tokens=10 , do_sample=__a )
_a = greedy_ids[:, input_ids.shape[1] :]
_a = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_a = TextStreamer(__a , skip_prompt=__a )
model.generate(__a , max_new_tokens=10 , do_sample=__a , streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_a = cs.out[:-1]
self.assertEqual(__a , __a )
def UpperCamelCase__ ( self : int ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_a = AutoTokenizer.from_pretrained("distilgpt2" )
_a = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__a )
_a = -1
_a = torch.ones((1, 5) , device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_a = TextStreamer(__a , skip_special_tokens=__a )
model.generate(__a , max_new_tokens=1 , do_sample=__a , streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_a = cs.out[:-1] # Remove the final "\n"
_a = tokenizer(__a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCamelCase__ ( self : Any ):
_a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__a )
_a = -1
_a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__a )
_a = TextIteratorStreamer(__a , timeout=0.001 )
_a = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_a = Thread(target=model.generate , kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
_a = ""
for new_text in streamer:
streamer_text += new_text
| 63
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
| 97
| 0
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def _snake_case ( ):
_lowerCamelCase : Optional[Any] = Github(os.environ['GITHUB_TOKEN'] )
_lowerCamelCase : List[Any] = g.get_repo('huggingface/accelerate' )
_lowerCamelCase : Union[str, Any] = repo.get_issues(state='open' )
for issue in open_issues:
_lowerCamelCase : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=__lowerCamelCase )
_lowerCamelCase : Any = comments[0] if len(__lowerCamelCase ) > 0 else None
_lowerCamelCase : Optional[Any] = dt.utcnow()
_lowerCamelCase : Optional[int] = (current_time - issue.updated_at).days
_lowerCamelCase : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 350
|
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 12
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRContextEncoderTokenizer
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRQuestionEncoderTokenizer
_lowerCamelCase : int = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
assert len(UpperCAmelCase__) == len(
UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts."""
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A__ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = DPRReaderTokenizer
| 14
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78
| 0
|
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] )-> List[str]:
lowerCamelCase__ : int =""""""
lowerCamelCase__ : str =""""""
lowerCamelCase__ : Dict =[]
def snake_case ( self : Tuple, lowerCamelCase : int, lowerCamelCase : int )-> Optional[int]:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCamelCase__ : int =self.__min_dist_top_down_dp(m - 1, n - 1 )
else:
lowerCamelCase__ : Union[str, Any] =self.__min_dist_top_down_dp(lowercase_, n - 1 )
lowerCamelCase__ : List[Any] =self.__min_dist_top_down_dp(m - 1, lowercase_ )
lowerCamelCase__ : List[str] =self.__min_dist_top_down_dp(m - 1, n - 1 )
lowerCamelCase__ : Optional[Any] =1 + min(lowercase_, lowercase_, lowercase_ )
return self.dp[m][n]
def snake_case ( self : Dict, lowerCamelCase : str, lowerCamelCase : str )-> Tuple:
lowerCamelCase__ : Any =worda
lowerCamelCase__ : Union[str, Any] =worda
lowerCamelCase__ : Optional[int] =[[-1 for _ in range(len(lowercase_ ) )] for _ in range(len(lowercase_ ) )]
return self.__min_dist_top_down_dp(len(lowercase_ ) - 1, len(lowercase_ ) - 1 )
def snake_case ( self : List[Any], lowerCamelCase : str, lowerCamelCase : str )-> Union[str, Any]:
lowerCamelCase__ : Any =worda
lowerCamelCase__ : int =worda
lowerCamelCase__ : str =len(lowercase_ )
lowerCamelCase__ : int =len(lowercase_ )
lowerCamelCase__ : Tuple =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCamelCase__ : Tuple =j
elif j == 0: # second string is empty
lowerCamelCase__ : str =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCamelCase__ : Any =self.dp[i - 1][j - 1]
else:
lowerCamelCase__ : str =self.dp[i][j - 1]
lowerCamelCase__ : Union[str, Any] =self.dp[i - 1][j]
lowerCamelCase__ : List[Any] =self.dp[i - 1][j - 1]
lowerCamelCase__ : int =1 + min(lowercase_, lowercase_, lowercase_ )
return self.dp[m][n]
if __name__ == "__main__":
_lowercase : int = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
_lowercase : List[str] = input("Enter the first string: ").strip()
_lowercase : Union[str, Any] = input("Enter the second string: ").strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 371
|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
if "model" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowerCamelCase__ : List[Any] =orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowerCamelCase__ : List[str] =orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowerCamelCase__ : str =orig_key.split('''.''' )[0].split('''_''' )[-1]
lowerCamelCase__ : Dict =orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowerCamelCase__ : str =orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowerCamelCase__ : List[str] =orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowerCamelCase__ : Dict =orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowerCamelCase__ : str =orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowerCamelCase__ : Tuple =orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowerCamelCase__ : Optional[int] ='''yoso.''' + orig_key
return orig_key
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Optional[Any] =orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase__ : List[str] =val
lowerCamelCase__ : Optional[int] =orig_state_dict['''cls.predictions.decoder.bias''']
lowerCamelCase__ : str =torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowerCamelCase__ : List[Any] =YosoConfig.from_json_file(__lowerCamelCase )
lowerCamelCase__ : List[str] =YosoForMaskedLM(__lowerCamelCase )
lowerCamelCase__ : Tuple =convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 272
| 0
|
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_lowercase = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
_lowerCamelCase: bool = None
_lowerCamelCase: bool = None
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = datasets.Audio()
_lowerCamelCase: List[Any] = '''audio'''
_lowerCamelCase: Optional[Any] = AudioFolderConfig
_lowerCamelCase: List[str] # definition at the bottom of the script
_lowerCamelCase: Dict = AudioClassification(audio_column='''audio''' , label_column='''label''' )
_lowercase = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
_lowercase = AUDIO_EXTENSIONS
| 74
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333
| 0
|
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
_UpperCAmelCase : str = sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match"
_UpperCAmelCase : Dict = nn.Parameter(lowerCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match"
_UpperCAmelCase : Optional[Any] = nn.Parameter(lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# set torch weights for 1-to-1 comparison
_UpperCAmelCase : List[str] = np.asarray(weights[0] )
_UpperCAmelCase : Union[str, Any] = np.asarray(weights[1] )
_UpperCAmelCase : Optional[Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# set torch weights for 1-to-1 comparison
_UpperCAmelCase : Optional[int] = np.asarray(weights[0] )
_UpperCAmelCase : Tuple = np.asarray(weights[1] )
_UpperCAmelCase : List[str] = np.asarray(weights[2] )
_UpperCAmelCase : str = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# layernorm 1
_UpperCAmelCase : Tuple = weights[0][0][0]
_UpperCAmelCase : Optional[int] = np.asarray(layer_norm_a[0] )
_UpperCAmelCase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# lsh weights + output
_UpperCAmelCase : List[Any] = weights[0][1]
if len(lowerCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
# intermediate weighs
_UpperCAmelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase_ ) == 4:
_UpperCAmelCase : List[str] = intermediate_weights[2]
# layernorm 2
_UpperCAmelCase : str = np.asarray(intermediate_weights[0][0] )
_UpperCAmelCase : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# intermediate dense
_UpperCAmelCase : int = np.asarray(intermediate_weights[1][0] )
_UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
# intermediate out
_UpperCAmelCase : Tuple = np.asarray(intermediate_weights[4][0] )
_UpperCAmelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# reformer model
_UpperCAmelCase : Union[str, Any] = torch_model.reformer
# word embeds
_UpperCAmelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase_ ) , )
if isinstance(weights[3] , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_UpperCAmelCase : Any = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"{position_embeddings[emb_idx]} emb does not match"
_UpperCAmelCase : Dict = nn.Parameter(torch.tensor(lowerCAmelCase_ ) )
_UpperCAmelCase : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_UpperCAmelCase : Any = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# output layer norm
_UpperCAmelCase : str = np.asarray(weights[7][0] )
_UpperCAmelCase : Optional[int] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# output embeddings
_UpperCAmelCase : Tuple = np.asarray(weights[9][0] )
_UpperCAmelCase : Optional[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# Initialise PyTorch model
_UpperCAmelCase : Optional[int] = ReformerConfig.from_json_file(lowerCAmelCase_ )
print(f"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase : Any = ReformerModelWithLMHead(lowerCAmelCase_ )
with open(lowerCAmelCase_ , """rb""" ) as f:
_UpperCAmelCase : List[str] = pickle.load(lowerCAmelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 170
| 0
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a_ ( ):
'''simple docstring'''
raise RuntimeError('CUDA out of memory.' )
class A ( nn.Module ):
def __init__( self : Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
_lowerCamelCase : Union[str, Any] =nn.Linear(3 , 4 )
_lowerCamelCase : List[str] =nn.BatchNormad(4 )
_lowerCamelCase : int =nn.Linear(4 , 5 )
def lowerCamelCase ( self : Tuple , lowercase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class A ( unittest.TestCase ):
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ : List[str] ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase_ , [128, 64, 32, 16, 8] )
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ : Optional[Any] , lowercase_ : Tuple ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_lowerCamelCase , _lowerCamelCase : str =mock_training_loop_function('hello' )
self.assertListEqual(lowercase_ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase_ : Any ):
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ : Union[str, Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def lowerCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ : int ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def lowerCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_lowerCamelCase : int =torch.cuda.memory_allocated()
_lowerCamelCase : Union[str, Any] =ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase_ )
_lowerCamelCase : Optional[Any] =release_memory(lowercase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowercase_ )
| 199
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A :
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict=13 , lowercase_ : List[Any]=7 , lowercase_ : List[Any]=True , lowercase_ : Any=True , lowercase_ : Optional[int]=False , lowercase_ : Tuple=True , lowercase_ : List[str]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[Any]=5 , lowercase_ : Dict=4 , lowercase_ : List[Any]=37 , lowercase_ : Any="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Any=16 , lowercase_ : Optional[int]=2 , lowercase_ : Any=0.02 , lowercase_ : Dict=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : Tuple=None , ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] =parent
_lowerCamelCase : Tuple =batch_size
_lowerCamelCase : Any =seq_length
_lowerCamelCase : int =is_training
_lowerCamelCase : int =use_input_mask
_lowerCamelCase : List[str] =use_token_type_ids
_lowerCamelCase : Dict =use_labels
_lowerCamelCase : int =vocab_size
_lowerCamelCase : Optional[Any] =hidden_size
_lowerCamelCase : Union[str, Any] =num_hidden_layers
_lowerCamelCase : Any =num_attention_heads
_lowerCamelCase : Tuple =intermediate_size
_lowerCamelCase : List[str] =hidden_act
_lowerCamelCase : int =hidden_dropout_prob
_lowerCamelCase : Optional[int] =attention_probs_dropout_prob
_lowerCamelCase : Any =max_position_embeddings
_lowerCamelCase : Optional[Any] =type_vocab_size
_lowerCamelCase : List[Any] =type_sequence_label_size
_lowerCamelCase : Union[str, Any] =initializer_range
_lowerCamelCase : Dict =num_labels
_lowerCamelCase : Optional[Any] =num_choices
_lowerCamelCase : Dict =scope
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Union[str, Any] =None
if self.use_input_mask:
_lowerCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : str =None
if self.use_token_type_ids:
_lowerCamelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Tuple =None
_lowerCamelCase : Optional[Any] =None
_lowerCamelCase : Optional[Any] =None
if self.use_labels:
_lowerCamelCase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Optional[Any] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Dict =LlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : List[str] =model(lowercase_ , attention_mask=lowercase_ )
_lowerCamelCase : List[str] =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =True
_lowerCamelCase : str =LlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : int =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
_lowerCamelCase : str =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
_lowerCamelCase : List[Any] =model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Any , ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : int =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : int , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] =True
_lowerCamelCase : Tuple =True
_lowerCamelCase : str =LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
_lowerCamelCase : int =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
_lowerCamelCase : Any =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : Optional[Any] =torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase : Optional[int] =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
_lowerCamelCase : Tuple =model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
_lowerCamelCase : int =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Optional[int] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCamelCase : int =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] =config_and_inputs
_lowerCamelCase : Union[str, Any] ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Optional[Any] =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ : Union[str, Any] =(LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : List[Any] =(
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Tuple =False
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =LlamaModelTester(self )
_lowerCamelCase : Any =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Tuple =type
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str =3
_lowerCamelCase : str =input_dict['input_ids']
_lowerCamelCase : int =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Optional[Any] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Dict =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] =3
_lowerCamelCase : List[Any] ='single_label_classification'
_lowerCamelCase : List[str] =input_dict['input_ids']
_lowerCamelCase : Any =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : List[str] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : str =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Optional[int] =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int =3
_lowerCamelCase : Optional[Any] ='multi_label_classification'
_lowerCamelCase : str =input_dict['input_ids']
_lowerCamelCase : Tuple =input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Tuple =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : Optional[Any] =LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Optional[Any] =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCamelCase ( self : Optional[int] , lowercase_ : List[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] =ids_tensor([1, 10] , config.vocab_size )
_lowerCamelCase : List[str] =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Optional[int] =LlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
_lowerCamelCase : List[Any] =original_model(lowercase_ ).last_hidden_state
_lowerCamelCase : Dict =original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Dict ={'type': scaling_type, 'factor': 10.0}
_lowerCamelCase : int =LlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
_lowerCamelCase : Optional[int] =scaled_model(lowercase_ ).last_hidden_state
_lowerCamelCase : Dict =scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
@require_torch
class A ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Optional[Any] =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_lowerCamelCase : int =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowerCamelCase : int =torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Dict =torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Optional[Any] =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_lowerCamelCase : List[Any] =model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
_lowerCamelCase : str =torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : List[str] =torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : str =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_lowerCamelCase : int =model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
_lowerCamelCase : str =torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Union[str, Any] =torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
_lowerCamelCase : Any =LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_lowerCamelCase : Optional[Any] =model(torch.tensor(lowercase_ ) )
_lowerCamelCase : Optional[int] =torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
_lowerCamelCase : int =torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Tuple ='Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_lowerCamelCase : Union[str, Any] ='Simply put, the theory of relativity states that '
_lowerCamelCase : int =LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_lowerCamelCase : str =tokenizer.encode(lowercase_ , return_tensors='pt' )
_lowerCamelCase : List[Any] =LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowercase_ )
# greedy generation outputs
_lowerCamelCase : str =model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_ )
_lowerCamelCase : Tuple =tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 199
| 1
|
"""simple docstring"""
import string
from math import logaa
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str )-> int:
_lowerCamelCase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
_lowerCamelCase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str )-> tuple[int, int]:
_lowerCamelCase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
_lowerCamelCase = corpus_without_punctuation.split('\n' )
_lowerCamelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(snake_case ))
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : int , snake_case : int=False )-> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : int )-> float:
return round(tf * idf , 3 )
| 80
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : List[str] =logging.get_logger(__name__)
A_ : Optional[Any] ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : Tuple ={
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A_ : Any ={
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
A_ : List[str] ={
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[Any] = RoFormerTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , a__ ) != do_lower_case
or pre_tok_state.get('strip_accents' , a__ ) != strip_accents
):
_lowerCamelCase = getattr(a__ , pre_tok_state.pop('type' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = pre_tok_class(**a__ )
_lowerCamelCase = do_lower_case
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = BertPreTokenizer()
return state
def __setstate__( self , a__ ):
_lowerCamelCase = d
_lowerCamelCase = self.__dict__['_tokenizer'].get_vocab()
_lowerCamelCase = PreTokenizer.custom(JiebaPreTokenizer(a__ ) )
def snake_case_ ( self , a__ , a__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def snake_case_ ( self , a__ , a__=None , a__=None , a__=False , **a__ , ):
_lowerCamelCase = BertPreTokenizer()
return super().save_pretrained(a__ , a__ , a__ , a__ , **a__ )
| 80
| 1
|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ) -> List[str]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase : List[str] = TapasConfig.from_json_file(_lowerCAmelCase )
# set absolute/relative position embeddings parameter
UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase : Any = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase : int = 4
UpperCAmelCase : int = True
# hparam_utils.py hparams
UpperCAmelCase : Union[str, Any] = 0.6_6_4_6_9_4
UpperCAmelCase : Tuple = 0.2_0_7_9_5_1
UpperCAmelCase : Dict = 0.1_2_1_1_9_4
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = True
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Tuple = 0.0_3_5_2_5_1_3
UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Tuple = False
# hparam_utils.py hparams
UpperCAmelCase : Union[str, Any] = 3_6.4_5_1_9
UpperCAmelCase : Optional[Any] = 0.9_0_3_4_2_1
UpperCAmelCase : Dict = 2_2_2.0_8_8
UpperCAmelCase : int = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : Any = 0.7_6_3_1_4_1
UpperCAmelCase : Tuple = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "TABFACT":
UpperCAmelCase : List[str] = TapasForSequenceClassification(config=_lowerCAmelCase )
elif task == "MLM":
UpperCAmelCase : List[str] = TapasForMaskedLM(config=_lowerCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase : List[Any] = TapasModel(config=_lowerCAmelCase )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCAmelCase )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
UpperCAmelCase : Dict = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(_lowerCAmelCase )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCamelCase__: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__: Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 23
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12
| 0
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , SCREAMING_SNAKE_CASE_ , )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
UpperCamelCase : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase , UpperCamelCase : Union[str, Any] = image[0].size
UpperCamelCase , UpperCamelCase : List[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCamelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase : Tuple = np.concatenate(SCREAMING_SNAKE_CASE_ , axis=0 )
UpperCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0
UpperCamelCase : int = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase : List[str] = 2.0 * image - 1.0
UpperCamelCase : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase : Tuple = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
return image
def a ( SCREAMING_SNAKE_CASE_ : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
return mask
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
UpperCamelCase : Optional[int] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCamelCase , UpperCamelCase : Union[str, Any] = mask[0].size
UpperCamelCase , UpperCamelCase : Any = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase : Any = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
UpperCamelCase : Union[str, Any] = np.concatenate(SCREAMING_SNAKE_CASE_ , axis=0 )
UpperCamelCase : Union[str, Any] = mask.astype(np.floataa ) / 255.0
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = 1
UpperCamelCase : List[str] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
elif isinstance(mask[0] , torch.Tensor ):
UpperCamelCase : List[Any] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
return mask
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : UNetaDModel
__UpperCamelCase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
UpperCamelCase : Any = image
UpperCamelCase : Optional[int] = _preprocess_image(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase : Optional[Any] = _preprocess_mask(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase : str = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase : Optional[Any] = original_image.shape
UpperCamelCase : Union[str, Any] = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
UpperCamelCase : List[str] = eta
UpperCamelCase : Any = self.scheduler.timesteps[0] + 1
UpperCamelCase : str = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCamelCase : str = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
UpperCamelCase : Dict = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCamelCase : str = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = t
UpperCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase : str = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 315
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE_ )]
__UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__UpperCAmelCase : Tuple = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 315
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = state_dict.pop(lowercase_ )
A__ = val
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A__ = value
else:
A__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Dict:
"""simple docstring"""
A__ = ''''''
if is_panoptic:
A__ = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A__ = '''resnet101'''
if "dc5" in model_name:
A__ = True
A__ = '''panoptic''' in model_name
if is_panoptic:
A__ = 250
else:
A__ = 91
A__ = '''huggingface/label-files'''
A__ = '''coco-detection-id2label.json'''
A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load image processor
A__ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
A__ = ConditionalDetrImageProcessor(format=lowercase_ )
# prepare image
A__ = prepare_img()
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' )
A__ = encoding['''pixel_values''']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
A__ = torch.hub.load('''DeppMeng/ConditionalDETR''' , lowercase_ , pretrained=lowercase_ ).eval()
A__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A__ = '''conditional_detr.''' + src
rename_key(lowercase_ , lowercase_ , lowercase_ )
A__ = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ , is_panoptic=lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
A__ = state_dict.pop(lowercase_ )
A__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A__ = state_dict.pop(lowercase_ )
A__ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
A__ = state_dict.pop(lowercase_ )
A__ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A__ = state_dict.pop(lowercase_ )
A__ = val
# finally, create HuggingFace model and load state dict
A__ = ConditionalDetrForSegmentation(lowercase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
model.push_to_hub(repo_id=lowercase_ , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
A__ = conditional_detr(lowercase_ )
A__ = model(lowercase_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 14
|
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( _lowerCamelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
@property
def A_ ( self : List[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A_ ( self : Union[str, Any] ):
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def A_ ( self : Tuple ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = '''A red cat sitting on a park bench'''
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A_ ( self : Tuple ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = '''A red cat sitting on a park bench'''
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 56
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowercase : int =logging.getLogger(__name__)
@dataclass
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__lowerCAmelCase :Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__lowerCAmelCase :int = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCAmelCase :bool = field(
default=A__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowerCAmelCase :bool = field(
default=A__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__lowerCAmelCase :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__lowerCAmelCase :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__lowerCAmelCase :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__lowerCAmelCase :Optional[str] = field(
default=A__ , metadata={"help": "A csv or a json file containing the training data."} )
__lowerCAmelCase :Optional[str] = field(
default=A__ , metadata={"help": "A csv or a json file containing the validation data."} )
__lowerCAmelCase :Optional[str] = field(default=A__ , metadata={"help": "A csv or a json file containing the test data."} )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
a__ : Dict = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
a__ : List[str] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :str = field(
default=A__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCAmelCase :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCAmelCase :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCAmelCase :Optional[str] = field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__lowerCAmelCase :bool = field(
default=A__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__lowerCAmelCase :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__lowerCAmelCase :bool = field(
default=A__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(""".json"""):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
a__ , a__ , a__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout)] , )
a__ : Dict = training_args.get_process_log_level()
logger.setLevel(_lowercase)
datasets.utils.logging.set_verbosity(_lowercase)
transformers.utils.logging.set_verbosity(_lowercase)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Detecting last checkpoint.
a__ : Any = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
a__ : str = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""")
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
a__ : Tuple = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
a__ : Tuple = data_args.train_file.split(""".""")[-1]
a__ : Any = data_args.test_file.split(""".""")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
a__ : int = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""")
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''')
if data_args.train_file.endswith(""".csv"""):
# Loading a dataset from local csv files
a__ : int = load_dataset("""csv""" , data_files=_lowercase , cache_dir=model_args.cache_dir)
else:
# Loading a dataset from local json files
a__ : Dict = load_dataset("""json""" , data_files=_lowercase , cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
a__ : int = raw_datasets["""train"""].features["""label"""].names
a__ : Any = len(_lowercase)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
a__ : List[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_lowercase , )
a__ : Dict = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
a__ : List[Any] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
a__ : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
a__ : Union[str, Any] = {"""Refused""": 0, """Entailed""": 1}
a__ : Dict = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''')
a__ : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length)
def preprocess_tabfact_function(_lowercase : Tuple):
# Tokenize the texts
def _convert_table_text_to_pandas(_lowercase : Dict):
a__ : Dict = [_table_row.split("""#""") for _table_row in _table_text.strip("""\n""").split("""\n""")]
a__ : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0])
return _table_pd
a__ : str = examples["""statement"""]
a__ : Union[str, Any] = list(map(_convert_table_text_to_pandas , examples["""table_text"""]))
a__ : Tuple = tokenizer(_lowercase , _lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase)
a__ : int = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing"""):
a__ : List[str] = raw_datasets.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""")
a__ : Optional[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
a__ : str = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""")
a__ : List[str] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
a__ : List[str] = eval_dataset.select(range(data_args.max_eval_samples))
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""")
a__ : Any = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
a__ : Dict = predict_dataset.select(range(data_args.max_predict_samples))
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_lowercase)) , 3):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''')
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase : EvalPrediction):
a__ : Optional[int] = p.predictions[0] if isinstance(p.predictions , _lowercase) else p.predictions
a__ : str = np.argmax(_lowercase , axis=1)
return {"accuracy": (preds == p.label_ids).astype(np.floataa).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
a__ : Dict = default_data_collator
elif training_args.fpaa:
a__ : Union[str, Any] = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8)
else:
a__ : int = None
# Initialize our Trainer
a__ : List[str] = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
a__ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
a__ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a__ : Dict = last_checkpoint
a__ : Dict = trainer.train(resume_from_checkpoint=_lowercase)
a__ : int = train_result.metrics
a__ : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase)
)
a__ : int = min(_lowercase , len(_lowercase))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , _lowercase)
trainer.save_metrics("""train""" , _lowercase)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
a__ : List[str] = trainer.evaluate(eval_dataset=_lowercase)
a__ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase)
a__ : Dict = min(_lowercase , len(_lowercase))
trainer.log_metrics("""eval""" , _lowercase)
trainer.save_metrics("""eval""" , _lowercase)
if training_args.do_predict:
logger.info("""*** Predict ***""")
# Removing the `label` columns because it contains -1 and Trainer won't like that.
a__ : Any = predict_dataset.remove_columns("""label""")
a__ : Optional[Any] = trainer.predict(_lowercase , metric_key_prefix="""predict""").predictions
a__ : Any = np.argmax(_lowercase , axis=1)
a__ : List[str] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""")
if trainer.is_world_process_zero():
with open(_lowercase , """w""") as writer:
logger.info("""***** Predict Results *****""")
writer.write("""index\tprediction\n""")
for index, item in enumerate(_lowercase):
a__ : int = label_list[item]
writer.write(F'''{index}\t{item}\n''')
a__ : Tuple = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase)
else:
trainer.create_model_card(**_lowercase)
def lowerCAmelCase_ ( _lowercase : Any) -> Union[str, Any]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 170
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_snake_case )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3 ) )
@slow
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCAmelCase = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase = model(_snake_case )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3 ) )
| 152
|
def _lowerCAmelCase ( A__: list[int] , A__: list[int] ):
'''simple docstring'''
UpperCAmelCase = len(A__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCAmelCase = 0
print(A__ , end=''',''' )
# Consider rest of the activities
for j in range(A__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(A__ , end=''',''' )
UpperCAmelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = [1, 3, 0, 5, 8, 5]
__magic_name__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 152
| 1
|
'''simple docstring'''
import argparse
import os
import re
a__ : Any = 'src/diffusers'
# Pattern that looks at the indentation in a line.
a__ : Any = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
a__ : List[Any] = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a__ : Dict = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
a__ : Union[str, Any] = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a__ : Dict = re.compile(R'\[([^\]]+)\]')
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = _re_indent.search(__A )
return "" if search is None else search.groups()[0]
def _UpperCamelCase ( __A , __A="" , __A=None , __A=None ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
UpperCamelCase__ = ["\n".join(lines[:index] )]
else:
UpperCamelCase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ = [lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__A ) )
if index < len(__A ) - 1:
UpperCamelCase__ = [lines[index + 1]]
index += 1
else:
UpperCamelCase__ = []
else:
blocks.append("\n".join(__A ) )
UpperCamelCase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append("\n".join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
def _inner(__A ):
return key(__A ).lower().replace("_" , "" )
return _inner
def _UpperCamelCase ( __A , __A=None ) -> Optional[Any]:
'''simple docstring'''
def noop(__A ):
return x
if key is None:
UpperCamelCase__ = noop
# Constants are all uppercase, they go first.
UpperCamelCase__ = [obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ = [obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ = [obj for obj in objects if not key(__A )[0].isupper()]
UpperCamelCase__ = ignore_underscore(__A )
return sorted(__A , key=__A ) + sorted(__A , key=__A ) + sorted(__A , key=__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
def _replace(__A ):
UpperCamelCase__ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
UpperCamelCase__ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__A )] ) + "]"
UpperCamelCase__ = import_statement.split("\n" )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ = 2 if lines[1].strip() == "[" else 1
UpperCamelCase__ = [(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase__ = sort_objects(__A , key=lambda __A : x[1] )
UpperCamelCase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase__ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ = keys[:-1]
UpperCamelCase__ = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ = _re_bracket_content.sub(_replace , __A )
return import_statement
def _UpperCamelCase ( __A , __A=True ) -> Optional[int]:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ = split_code_in_indented_blocks(
__A , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ = main_blocks[block_idx]
UpperCamelCase__ = block.split("\n" )
# Get to the start of the imports.
UpperCamelCase__ = 0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ = len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ = "\n".join(block_lines[line_idx:-1] )
UpperCamelCase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ = split_code_in_indented_blocks(__A , indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ = [(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ = [(i, key) for i, key in enumerate(__A ) if key is not None]
UpperCamelCase__ = [x[0] for x in sorted(__A , key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ = 0
UpperCamelCase__ = []
for i in range(len(__A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__A , "w" ) as f:
f.write("\n".join(__A ) )
def _UpperCamelCase ( __A=True ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
UpperCamelCase__ = sort_imports(os.path.join(__A , "__init__.py" ) , check_only=__A )
if result:
UpperCamelCase__ = [os.path.join(__A , "__init__.py" )]
if len(__A ) > 0:
raise ValueError(F'''Would overwrite {len(__A )} files, run `make style`.''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 80
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a__ : Optional[List[str]] = None
a__ : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a__ : Any = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowercase_ :
__UpperCAmelCase = True
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "PIL.Image.Image"
__UpperCAmelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase = field(default='Image' , init=a__ , repr=a__ )
def __call__( self ):
return self.pa_type
def __a ( self , a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a , a ):
UpperCamelCase__ = np.array(a )
if isinstance(a , a ):
return {"path": value, "bytes": None}
elif isinstance(a , a ):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a )
elif isinstance(a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __a ( self , a , a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a ):
UpperCamelCase__ = PIL.Image.open(a )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a , config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a )
except ValueError:
UpperCamelCase__ = None
with xopen(a , "rb" , use_auth_token=a ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __a ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __a ( self , a ):
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def __a ( self , a ):
@no_op_if_value_is_null
def path_to_bytes(a ):
with xopen(a , "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ = pa.array(
[os.path.basename(a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCamelCase ( __A ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__A , format=__A )
return buffer.getvalue()
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if hasattr(__A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(__A )
UpperCamelCase__ = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 80
| 1
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class a :
def __init__( self , __magic_name__ = None ) -> int:
_a = value
_a = None # Added in order to delete a node easier
_a = None
_a = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'{self.value}': (self.left, self.right)} , indent=1 )
class a :
def __init__( self , __magic_name__ = None ) -> Tuple:
_a = root
def __str__( self ) -> str:
return str(self.root )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
if new_children is not None: # reset its kids
_a = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
_a = new_children
else:
_a = new_children
else:
_a = new_children
def __UpperCAmelCase ( self , __magic_name__ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCAmelCase ( self ) -> bool:
return self.root is None
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
_a = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
_a = new_node # set its root
else: # Tree is not empty
_a = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_a = new_node # We insert the new node in a leaf
break
else:
_a = parent_node.left
else:
if parent_node.right is None:
_a = new_node
break
else:
_a = parent_node.right
_a = parent_node
def __UpperCAmelCase ( self , *__magic_name__ ) -> None:
for value in values:
self.__insert(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Node | None:
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
_a = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_a = node.left if value < node.value else node.right
return node
def __UpperCAmelCase ( self , __magic_name__ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_a = self.root
if not self.empty():
while node.right is not None:
_a = node.right
return node
def __UpperCAmelCase ( self , __magic_name__ = None ) -> Node | None:
if node is None:
_a = self.root
if self.root is None:
return None
if not self.empty():
_a = self.root
while node.left is not None:
_a = node.left
return node
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
_a = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
_a = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_a = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCAmelCase ( self , __magic_name__ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __UpperCAmelCase ( self , __magic_name__=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
_a = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def _A (lowerCAmelCase__ :Node | None ) -> list[Node]:
'''simple docstring'''
_a = []
if curr_node is not None:
_a = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _A () -> None:
'''simple docstring'''
_a = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_a = BinarySearchTree()
for i in testlist:
t.insert(lowerCAmelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCAmelCase__ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 104
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : str = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git_vision_model"""
def __init__( self , __magic_name__=7_68 , __magic_name__=30_72 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=2_24 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.0_2 , **__magic_name__ , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
_a , _a = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
_a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git"""
def __init__( self , __magic_name__=None , __magic_name__=3_05_22 , __magic_name__=7_68 , __magic_name__=6 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10_24 , __magic_name__=0.0_2 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=False , __magic_name__=1_01 , __magic_name__=1_02 , __magic_name__=None , **__magic_name__ , ) -> Optional[int]:
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , pad_token_id=__magic_name__ , **__magic_name__ )
if vision_config is None:
_a = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
_a = GitVisionConfig(**__magic_name__ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = tie_word_embeddings
_a = num_image_with_embedding
_a = bos_token_id
_a = eos_token_id
def __UpperCAmelCase ( self ) -> List[str]:
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
| 104
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for data in source_data:
for i, el in enumerate(_A ):
if len(_A ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_A ) )
return data_lists
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Dict = []
for dlist, weight in zip(_A , _A ):
UpperCAmelCase : List[str] = min(_A )
UpperCAmelCase : List[str] = max(_A )
UpperCAmelCase : Optional[int] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCAmelCase : Tuple = F'''Invalid weight of {weight:f} provided'''
raise ValueError(_A )
score_lists.append(_A )
return score_lists
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Optional[Any] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_A ):
UpperCAmelCase : Union[str, Any] = final_scores[j] + ele
return final_scores
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : int = get_data(_A )
UpperCAmelCase : Tuple = calculate_each_score(_A , _A )
UpperCAmelCase : Optional[int] = generate_final_scores(_A )
# append scores to source data
for i, ele in enumerate(_A ):
source_data[i].append(_A )
return source_data
| 265
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _a ( unittest.TestCase ):
def __init__( self : Union[str, Any], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : str=7, lowerCAmelCase__ : List[Any]=3, lowerCAmelCase__ : int=1_8, lowerCAmelCase__ : Union[str, Any]=3_0, lowerCAmelCase__ : Optional[Any]=4_0_0, lowerCAmelCase__ : int=True, lowerCAmelCase__ : List[str]=3_2, lowerCAmelCase__ : Optional[int]=True, ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : int = image_size
_UpperCamelCase : Dict = min_resolution
_UpperCamelCase : Any = max_resolution
_UpperCamelCase : Optional[Any] = do_resize
_UpperCamelCase : Dict = size_divisor
_UpperCamelCase : List[str] = do_rescale
def snake_case ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = GLPNImageProcessor if is_vision_available() else None
def snake_case ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Dict = GLPNImageProcessingTester(self )
@property
def snake_case ( self : Dict ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__, '''size_divisor''' ) )
self.assertTrue(hasattr(lowerCAmelCase__, '''resample''' ) )
self.assertTrue(hasattr(lowerCAmelCase__, '''do_rescale''' ) )
def snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__, Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCamelCase : Tuple = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__, np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCamelCase : Union[str, Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__, torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCamelCase : Optional[Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 360
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 128
| 0
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ =get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, 'r', encoding='utf-8') as f:
lowercase__ =json.load(f)
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Union[str, Any] ):
return FSMTTokenizer.from_pretrained(__UpperCAmelCase )
def lowerCAmelCase (self : List[Any] , snake_case_ : str ):
__a : List[Any] = FSMTForConditionalGeneration.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 2_6.0],
['''ru-en''', 2_2.0],
['''en-de''', 2_2.0],
['''de-en''', 2_9.0],
] )
@slow
def lowerCAmelCase (self : Optional[int] , snake_case_ : Any , snake_case_ : List[Any] ):
__a : int = f"facebook/wmt19-{pair}"
__a : int = self.get_tokenizer(__UpperCAmelCase )
__a : Optional[int] = self.get_model(__UpperCAmelCase )
__a : int = bleu_data[pair]['''src''']
__a : List[str] = bleu_data[pair]['''tgt''']
__a : List[Any] = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , truncation=__UpperCAmelCase , padding='''longest''' ).to(__UpperCAmelCase )
__a : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__a : Any = tokenizer.batch_decode(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
__a : str = calculate_bleu(__UpperCAmelCase , __UpperCAmelCase )
print(__UpperCAmelCase )
self.assertGreaterEqual(scores['''bleu'''] , __UpperCAmelCase )
| 216
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : List[str] ) -> Dict:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCamelCase : str = flax_key_tuple[:-1] + ('weight',)
UpperCamelCase : str = torch.permute(_A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_A ):
# linear layer
UpperCamelCase : str = flax_key_tuple[:-1] + ('weight',)
UpperCamelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase : str = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> Tuple:
if "metadata" in layer:
UpperCamelCase : List[str] = layer.split('metadata' )
UpperCamelCase : Union[str, Any] = ''.join(split_layer[0] )[:-1]
UpperCamelCase : Dict = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
UpperCamelCase : Optional[int] = layer.split('kvstore' )
UpperCamelCase : Tuple = ''.join(split_layer[0] )[:-1]
UpperCamelCase : Tuple = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
UpperCamelCase : List[Any] = layer.split('/' )
UpperCamelCase : int = '/'.join(split_layer[:-1] )
UpperCamelCase : List[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCamelCase : List[Any] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
UpperCamelCase : Union[str, Any] = 'file'
else:
UpperCamelCase : Tuple = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Dict ) -> List[Any]:
UpperCamelCase : Any = rename_keys(_A )
UpperCamelCase : Tuple = {}
for k, v in current_block.items():
UpperCamelCase : Dict = v
UpperCamelCase : Tuple = new_current_block
torch.save(_A , _A )
def UpperCamelCase ( snake_case__ : str , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : str = WEIGHTS_NAME ) -> Union[str, Any]:
UpperCamelCase : List[str] = convert_file_size_to_int(_A )
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = {}
UpperCamelCase : List[str] = 0
UpperCamelCase : List[Any] = 0
os.makedirs(_A , exist_ok=_A )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
UpperCamelCase : Optional[Any] = serialization.msgpack_restore(fp.read() )['optimizer']['target']
UpperCamelCase : List[Any] = flatten_dict(_A , sep='/' )
UpperCamelCase : List[str] = {}
for layer in checkpoint_info.keys():
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = get_key_and_tensorstore_dict(
_A , _A , _A )
if curr_real_layer_name in all_layers:
UpperCamelCase : Tuple = content
else:
UpperCamelCase : Union[str, Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCamelCase : Tuple = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCamelCase : Union[str, Any] = torch.tensor(_A )
UpperCamelCase : List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCamelCase , UpperCamelCase : Dict = rename_base_flax_keys(tuple(key.split('/' ) ) , _A )
UpperCamelCase : Union[str, Any] = '/'.join(_A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCamelCase : List[str] = os.path.join(
_A , weights_name.replace('.bin' , F"""-{len(_A )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_A , _A )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCamelCase : List[str] = {}
UpperCamelCase : Any = 0
UpperCamelCase : Union[str, Any] = raw_weights.to(getattr(_A , _A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCamelCase : str = os.path.join(_A , weights_name.replace('.bin' , F"""-{len(_A )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_A , _A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCamelCase : str = {}
UpperCamelCase : Optional[int] = {}
for idx, shard in enumerate(_A ):
UpperCamelCase : Union[str, Any] = weights_name.replace(
'.bin' , F"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" ) # len(sharded_state_dicts):05d}
UpperCamelCase : Any = os.path.join(_A , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
UpperCamelCase : Union[str, Any] = shard
for key in shard:
UpperCamelCase : Tuple = shard_file
# Add the metadata
UpperCamelCase : Any = {'total_size': total_size}
UpperCamelCase : List[Any] = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_A , _A ) , 'w' , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = json.dumps(_A , indent=2 , sort_keys=_A ) + '\n'
f.write(_A )
return metadata, index
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__UpperCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase ( ) -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCamelCase : str = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
UpperCamelCase : Dict = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
UpperCamelCase : List[str] = TaTokenizer.from_pretrained('t5-small' )
UpperCamelCase : Union[str, Any] = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
UpperCamelCase : Tuple = tokenizer(_A , return_tensors='pt' ).input_ids
UpperCamelCase : Optional[Any] = model.generate(_A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 355
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = "data2vec-text"
def __init__( self, SCREAMING_SNAKE_CASE_=3_0522, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_="absolute", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : str = hidden_act
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : List[str] = position_embedding_type
UpperCamelCase : Any = use_cache
UpperCamelCase : Any = classifier_dropout
class lowerCAmelCase_ ( a__ ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 103
| 0
|
'''simple docstring'''
a_ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 152
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """CLIPImageProcessor"""
snake_case_ = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[Any] , __lowercase : Union[str, Any]=None , __lowercase : int=None , **__lowercase : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
def __call__( self : Union[str, Any] , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]=None , __lowercase : List[str]=None , **__lowercase : str ) -> Tuple:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : int =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def __magic_name__ ( self : int , *__lowercase : Optional[Any] , **__lowercase : Tuple ) -> Dict:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def __magic_name__ ( self : List[Any] , *__lowercase : Optional[Any] , **__lowercase : Union[str, Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def __magic_name__ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : str =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 152
| 1
|
'''simple docstring'''
import os
import string
import sys
__A : Any = 1 << 8
__A : int = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
__A : Union[str, Any] = KEYMAP["up"]
__A : Dict = KEYMAP["left"]
if sys.platform == "win32":
__A : int = []
__A : Dict = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
__A : List[Any] = ord(str(i))
def UpperCamelCase_ ( ):
if os.name == "nt":
import msvcrt
lowerCAmelCase_ : str = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
lowerCAmelCase_ : Any = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase_ : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase_ : Optional[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
lowerCAmelCase_ : int = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase_ : List[Any] = cha[1]
else:
lowerCAmelCase_ : List[str] = ch.decode(__lowerCamelCase )
else:
lowerCAmelCase_ : int = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase_ : Any = sys.stdin.fileno()
lowerCAmelCase_ : Any = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
lowerCAmelCase_ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase_ ( ):
lowerCAmelCase_ : Optional[Any] = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
lowerCAmelCase_ : Optional[int] = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
lowerCAmelCase_ : Any = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 368
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Tuple=13 , lowerCamelCase : Dict=30 , lowerCamelCase : Dict=2 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : List[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : str=32 , lowerCamelCase : Any=5 , lowerCamelCase : int=4 , lowerCamelCase : List[str]=37 , lowerCamelCase : Any="gelu" , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : str=10 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : List[str]=3 , lowerCamelCase : Union[str, Any]=0.6 , lowerCamelCase : List[Any]=None , ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : int = num_channels
lowerCAmelCase_ : Any = is_training
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = type_sequence_label_size
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : List[str] = mask_ratio
lowerCAmelCase_ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase_ : Union[str, Any] = (image_size // patch_size) ** 2
lowerCAmelCase_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowercase ( self : Optional[int] ) -> str:
lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ) -> Optional[int]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowercase ( self : Any , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict ) -> Tuple:
lowerCAmelCase_ : Tuple = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Dict = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ) -> Dict:
lowerCAmelCase_ : Tuple = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(lowerCamelCase )
lowerCAmelCase_ : int = (self.image_size // self.patch_size) ** 2
lowerCAmelCase_ : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : List[str] = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ : Tuple = model(lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowercase ( self : Optional[int] ) -> str:
lowerCAmelCase_ : Any = self.prepare_config_and_inputs()
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[Any] = config_and_inputs
lowerCAmelCase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def __lowercase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = ViTMAEModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def __lowercase ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def __lowercase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowercase ( self : List[str] ) -> Tuple:
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __lowercase ( self : Optional[Any] ) -> Any:
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = model_class(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Any = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __lowercase ( self : Tuple ) -> str:
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> str:
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def __lowercase ( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ) -> str:
# make masks reproducible
np.random.seed(2 )
lowerCAmelCase_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCAmelCase_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase_ : int = pt_noise
super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __lowercase ( self : int ) -> Dict:
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase_ : Any = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
lowerCAmelCase_ : Any = outputs[0].cpu().numpy()
lowerCAmelCase_ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCAmelCase_ : int = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCAmelCase_ : str = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
# Make sure we don't have nans
lowerCAmelCase_ : Optional[Any] = after_outputs[0].cpu().numpy()
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowercase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowercase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowercase ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def __lowercase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowercase ( self : List[Any] ) -> str:
pass
@slow
def __lowercase ( self : List[str] ) -> List[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[Any] = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def __lowercase ( self : int ) -> List[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCAmelCase_ : Dict = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase_ : Optional[int] = ViTMAEConfig()
lowerCAmelCase_ : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase_ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : str = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
lowerCAmelCase_ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
lowerCAmelCase_ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1E-4 ) )
| 89
| 0
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = cva.getAffineTransform(A__ , A__ )
return cva.warpAffine(A__ , A__ , (rows, cols) )
if __name__ == "__main__":
# read original image
lowerCAmelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
lowerCAmelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCAmelCase__ , lowerCAmelCase__ = gray_img.shape
# set different points to rotate image
lowerCAmelCase__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
lowerCAmelCase__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
lowerCAmelCase__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
lowerCAmelCase__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
lowerCAmelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCAmelCase__ = plt.figure(1)
lowerCAmelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 104
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase = logging.get_logger(__name__)
# General docstring
lowerCamelCase = '''RegNetConfig'''
# Base docstring
lowerCamelCase = '''facebook/regnet-y-040'''
lowerCamelCase = [1, 1088, 7, 7]
# Image classification docstring
lowerCamelCase = '''facebook/regnet-y-040'''
lowerCamelCase = '''tabby, tabby cat'''
lowerCamelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _a ( nn.Module):
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = 3 , _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : Optional[str] = "relu" , )-> Optional[int]:
super().__init__()
lowerCAmelCase__ : List[Any] = nn.Convad(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , groups=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : str = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
lowerCAmelCase__ : Tuple = self.convolution(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self.normalization(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class _a ( nn.Module):
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : RegNetConfig )-> Optional[Any]:
super().__init__()
lowerCAmelCase__ : List[Any] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCAmelCase__ : List[Any] = config.num_channels
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] )-> Dict:
lowerCAmelCase__ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowerCAmelCase__ : Dict = self.embedder(_SCREAMING_SNAKE_CASE )
return hidden_state
class _a ( nn.Module):
def __init__( self : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = 2 )-> List[Any]:
super().__init__()
lowerCAmelCase__ : str = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tensor )-> Tensor:
lowerCAmelCase__ : int = self.convolution(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class _a ( nn.Module):
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> List[Any]:
super().__init__()
lowerCAmelCase__ : List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
lowerCAmelCase__ : str = nn.Sequential(
nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.ReLU() , nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : List[str] )-> List[str]:
# b c h w -> b c 1 1
lowerCAmelCase__ : Optional[Any] = self.pooler(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = self.attention(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = hidden_state * attention
return hidden_state
class _a ( nn.Module):
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : RegNetConfig , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = 1 )-> str:
super().__init__()
lowerCAmelCase__ : int = in_channels != out_channels or stride != 1
lowerCAmelCase__ : List[Any] = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ : List[Any] = (
RegNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
lowerCAmelCase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : int )-> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = hidden_state
lowerCAmelCase__ : List[str] = self.layer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
lowerCAmelCase__ : str = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class _a ( nn.Module):
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : RegNetConfig , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = 1 )-> Optional[int]:
super().__init__()
lowerCAmelCase__ : Optional[int] = in_channels != out_channels or stride != 1
lowerCAmelCase__ : List[str] = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ : Any = (
RegNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ : Tuple = nn.Sequential(
RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetSELayer(_SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
lowerCAmelCase__ : Tuple = ACTaFN[config.hidden_act]
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> List[str]:
lowerCAmelCase__ : Optional[int] = hidden_state
lowerCAmelCase__ : Optional[Any] = self.layer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
lowerCAmelCase__ : List[Any] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class _a ( nn.Module):
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : RegNetConfig , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = 2 , _SCREAMING_SNAKE_CASE : int = 2 , )-> int:
super().__init__()
lowerCAmelCase__ : List[str] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowerCAmelCase__ : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(depth - 1 )] , )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : str )-> Dict:
lowerCAmelCase__ : Union[str, Any] = self.layers(_SCREAMING_SNAKE_CASE )
return hidden_state
class _a ( nn.Module):
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : RegNetConfig )-> List[Any]:
super().__init__()
lowerCAmelCase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase__ : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(RegNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Tensor , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = True )-> BaseModelOutputWithNoAttention:
lowerCAmelCase__ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ : int = hidden_states + (hidden_state,)
lowerCAmelCase__ : Optional[int] = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
lowerCAmelCase__ : List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE )
class _a ( _lowercase):
_a : int = RegNetConfig
_a : List[Any] = '''regnet'''
_a : Optional[Any] = '''pixel_values'''
_a : Dict = True
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : str )-> Tuple:
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str]=False )-> Optional[int]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Union[str, Any] = value
lowerCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , _lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _a ( _lowercase):
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Any )-> Union[str, Any]:
super().__init__(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = config
lowerCAmelCase__ : Any = RegNetEmbeddings(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = RegNetEncoder(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tensor , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None )-> BaseModelOutputWithPoolingAndNoAttention:
lowerCAmelCase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : List[Any] = self.embedder(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = encoder_outputs[0]
lowerCAmelCase__ : List[str] = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _a ( _lowercase):
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : List[Any] )-> str:
super().__init__(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = config.num_labels
lowerCAmelCase__ : Optional[Any] = RegNetModel(_SCREAMING_SNAKE_CASE )
# classification head
lowerCAmelCase__ : List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE : Optional[torch.LongTensor] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , )-> ImageClassifierOutputWithNoAttention:
lowerCAmelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Optional[Any] = self.regnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ : Optional[Any] = self.classifier(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ : Dict = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ : Dict = '''single_label_classification'''
else:
lowerCAmelCase__ : Any = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ : List[Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ : List[Any] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ : List[str] = CrossEntropyLoss()
lowerCAmelCase__ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ : int = BCEWithLogitsLoss()
lowerCAmelCase__ : List[Any] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
lowerCAmelCase__ : Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
| 364
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
lowerCamelCase = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
lowerCamelCase = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
lowerCamelCase = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass):
_a : List[str] = FLAX_MODEL_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _a ( _BaseAutoModelClass):
_a : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _a ( _BaseAutoModelClass):
_a : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _a ( _BaseAutoModelClass):
_a : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _a ( _BaseAutoModelClass):
_a : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _a ( _BaseAutoModelClass):
_a : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _a ( _BaseAutoModelClass):
_a : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 211
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_A : Tuple =None
_A : int =logging.get_logger(__name__)
_A : int ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_A : int ={
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
_A : Optional[Any] ={
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
_A : Optional[Any] =["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _lowercase ( a_ ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_VOCAB_FILES_MAP
a = ["""input_ids""", """attention_mask"""]
a = MBartTokenizer
a = []
a = []
def __init__( self: Any , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[str]=None , UpperCamelCase__: Optional[int]="<s>" , UpperCamelCase__: int="</s>" , UpperCamelCase__: str="</s>" , UpperCamelCase__: Union[str, Any]="<s>" , UpperCamelCase__: Tuple="<unk>" , UpperCamelCase__: List[Any]="<pad>" , UpperCamelCase__: List[str]="<mask>" , UpperCamelCase__: int=None , UpperCamelCase__: str=None , UpperCamelCase__: Optional[int]=None , **UpperCamelCase__: List[str] , ):
lowerCamelCase__ : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , )
lowerCamelCase__ : Optional[Any] = vocab_file
lowerCamelCase__ : Union[str, Any] = False if not self.vocab_file else True
lowerCamelCase__ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowerCamelCase__ : Tuple = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : str = src_lang if src_lang is not None else """en_XX"""
lowerCamelCase__ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase_ ( self: Tuple ):
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: int ):
lowerCamelCase__ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[str] , UpperCamelCase__: str = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self: int , UpperCamelCase__: Dict , UpperCamelCase__: str = None ):
lowerCamelCase__ : List[str] = [self.sep_token_id]
lowerCamelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: int , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCamelCase__ : Optional[Any] = src_lang
lowerCamelCase__ : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
lowerCamelCase__ : Dict = self.convert_tokens_to_ids(snake_case__ )
lowerCamelCase__ : Optional[Any] = tgt_lang_id
return inputs
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] = "en_XX" , UpperCamelCase__: Union[str, Any] = None , UpperCamelCase__: Optional[int] = "ro_RO" , **UpperCamelCase__: Optional[Any] , ):
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : str = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def lowerCamelCase_ ( self: List[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self: int ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(snake_case__ )
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase__ : str = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
lowerCamelCase__ : int = []
lowerCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase__ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: str = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCamelCase__ : Any = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 41
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = []
for part_id in partition_order:
UpperCamelCase_ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""").collect()
for row_idx, row in enumerate(_lowerCAmelCase):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(1_00).repartition(1)
UpperCamelCase_ = Spark(_lowerCAmelCase)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(10).repartition(2)
UpperCamelCase_ = [1, 0]
UpperCamelCase_ = _generate_iterable_examples(_lowerCAmelCase , _lowerCAmelCase) # Reverse the partitions.
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , _lowerCAmelCase)
for i, (row_id, row_dict) in enumerate(generate_fn()):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(10).repartition(1)
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator") as generator_mock:
UpperCamelCase_ = lambda _lowerCAmelCase: x.reverse()
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [2, 1, 0])
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shuffle_data_sources(_lowerCAmelCase)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(20).repartition(4)
# Partitions 0 and 2
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [0, 2])
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [1, 3])
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(1_00).repartition(1)
UpperCamelCase_ = Spark(_lowerCAmelCase)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 128
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
"""simple docstring"""
def __A ( self: Optional[Any] ) -> Any:
_A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__A , '''num_heads''' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: List[Any] , __A: Optional[Any] , __A: Union[str, Any]=13 , __A: Dict=64 , __A: int=3 , __A: Optional[int]=[16, 48, 96] , __A: Union[str, Any]=[1, 3, 6] , __A: Optional[Any]=[1, 2, 10] , __A: str=[7, 3, 3] , __A: Optional[Any]=[4, 2, 2] , __A: Any=[2, 1, 1] , __A: List[Any]=[2, 2, 2] , __A: List[str]=[False, False, True] , __A: Dict=[0.0, 0.0, 0.0] , __A: List[Any]=0.02 , __A: List[str]=1e-12 , __A: List[Any]=True , __A: Any=True , __A: Union[str, Any]=2 , ) -> Any:
_A = parent
_A = batch_size
_A = image_size
_A = patch_sizes
_A = patch_stride
_A = patch_padding
_A = is_training
_A = use_labels
_A = num_labels
_A = num_channels
_A = embed_dim
_A = num_heads
_A = stride_kv
_A = depth
_A = cls_token
_A = attention_drop_rate
_A = initializer_range
_A = layer_norm_eps
def __A ( self: Optional[Any] ) -> Union[str, Any]:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
# create a random int32 tensor of given shape
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def __A ( self: str ) -> Optional[int]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __A ( self: List[Any] , __A: List[Any] , __A: Optional[Any] , __A: List[str] ) -> int:
_A = TFCvtModel(config=__A )
_A = model(__A , training=__A )
_A = (self.image_size, self.image_size)
_A = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_A = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_A = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __A ( self: Dict , __A: Dict , __A: Optional[Any] , __A: List[str] ) -> int:
_A = self.num_labels
_A = TFCvtForImageClassification(__A )
_A = model(__A , labels=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self: int ) -> List[Any]:
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = TFCvtModelTester(self )
_A = TFCvtConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __A ( self: Tuple ) -> Any:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def __A ( self: Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def __A ( self: List[str] ) -> int:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def __A ( self: List[Any] ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def __A ( self: int ) -> Tuple:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def __A ( self: Any ) -> str:
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def __A ( self: List[str] ) -> Optional[int]:
_A = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(__A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def __A ( self: Dict ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__A )
_A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __A ( self: Union[str, Any] ) -> Tuple:
def check_hidden_states_output(__A: List[str] , __A: Union[str, Any] , __A: Tuple ):
_A = model_class(__A )
_A = model(**self._prepare_for_class(__A , __A ) )
_A = outputs.hidden_states
_A = len(self.model_tester.depth )
self.assertEqual(len(__A ) , __A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(__A , __A , __A )
def __A ( self: List[Any] ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __A ( self: int ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __A ( self: List[str] ) -> Optional[Any]:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFCvtModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __A ( ):
'''simple docstring'''
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self: Any ) -> Any:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __A ( self: int ) -> Optional[int]:
_A = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__A , return_tensors='''tf''' )
# forward pass
_A = model(**__A )
# verify the logits
_A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __A )
_A = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __A , atol=1e-4 ) )
| 356
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "rag"
A_ = True
def __init__( self: Optional[int] , __A: Optional[int]=None , __A: Dict=True , __A: Any=None , __A: Dict=None , __A: Optional[int]=None , __A: Optional[Any]=None , __A: Optional[Any]=None , __A: Optional[Any]=" / " , __A: int=" // " , __A: List[Any]=5 , __A: Dict=3_00 , __A: int=7_68 , __A: Tuple=8 , __A: List[Any]="wiki_dpr" , __A: List[str]="train" , __A: Optional[Any]="compressed" , __A: Optional[int]=None , __A: Union[str, Any]=None , __A: Dict=False , __A: Tuple=False , __A: Optional[int]=0.0 , __A: Optional[int]=True , __A: int=False , __A: int=False , __A: Optional[Any]=False , __A: Optional[int]=True , __A: Optional[int]=None , **__A: Optional[Any] , ) -> Union[str, Any]:
super().__init__(
bos_token_id=__A , pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , is_encoder_decoder=__A , prefix=__A , vocab_size=__A , **__A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_A = kwargs.pop('''question_encoder''' )
_A = question_encoder_config.pop('''model_type''' )
_A = kwargs.pop('''generator''' )
_A = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_A = AutoConfig.for_model(__A , **__A )
_A = AutoConfig.for_model(__A , **__A )
_A = reduce_loss
_A = label_smoothing
_A = exclude_bos_score
_A = do_marginalize
_A = title_sep
_A = doc_sep
_A = n_docs
_A = max_combined_length
_A = dataset
_A = dataset_split
_A = index_name
_A = retrieval_vector_size
_A = retrieval_batch_size
_A = passages_path
_A = index_path
_A = use_dummy_dataset
_A = output_retrieved
_A = do_deduplication
_A = use_cache
if self.forced_eos_token_id is None:
_A = getattr(self.generator , '''forced_eos_token_id''' , __A )
@classmethod
def __A ( cls: List[Any] , __A: PretrainedConfig , __A: PretrainedConfig , **__A: Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__A )
def __A ( self: Optional[Any] ) -> Dict:
_A = copy.deepcopy(self.__dict__ )
_A = self.question_encoder.to_dict()
_A = self.generator.to_dict()
_A = self.__class__.model_type
return output
| 75
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = FlaubertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lengths=lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = FlaubertWithLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = FlaubertForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = FlaubertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """traced_model.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 46
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
lowerCAmelCase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
lowerCAmelCase_ : List[Any] = '''xvjiarui/stable-diffusion-2-inpainting'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(A_ , safety_checker=A_)
lowerCAmelCase_ : List[str] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowerCAmelCase_ : List[Any] = jax.random.PRNGKey(0)
lowerCAmelCase_ : str = 5_0
lowerCAmelCase_ : List[Any] = jax.device_count()
lowerCAmelCase_ : Union[str, Any] = num_samples * [prompt]
lowerCAmelCase_ : str = num_samples * [init_image]
lowerCAmelCase_ : Union[str, Any] = num_samples * [mask_image]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pipeline.prepare_inputs(A_ , A_ , A_)
# shard inputs and rng
lowerCAmelCase_ : str = replicate(A_)
lowerCAmelCase_ : str = jax.random.split(A_ , jax.device_count())
lowerCAmelCase_ : List[Any] = shard(A_)
lowerCAmelCase_ : str = shard(A_)
lowerCAmelCase_ : Tuple = shard(A_)
lowerCAmelCase_ : int = pipeline(
A_ , A_ , A_ , A_ , A_ , A_ , jit=A_)
lowerCAmelCase_ : Optional[int] = output.images.reshape(A_ , 5_1_2 , 5_1_2 , 3)
lowerCAmelCase_ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase_ : int = jnp.asarray(jax.device_get(image_slice.flatten()))
lowerCAmelCase_ : str = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 103
| 0
|
'''simple docstring'''
def a_ ( __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =set({'''(''', '''[''', '''{'''} )
lowerCamelCase_ =set({''')''', ''']''', '''}'''} )
lowerCamelCase_ ={'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(__snake_case ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__snake_case ) == 0 or (len(__snake_case ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__snake_case ) == 0
def a_ ( ) -> str:
"""simple docstring"""
lowerCamelCase_ =input('''Enter sequence of brackets: ''' )
if is_balanced(__snake_case ):
print(__snake_case , '''is balanced''' )
else:
print(__snake_case , '''is not balanced''' )
if __name__ == "__main__":
main()
| 6
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 38
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89
| 0
|
import os
def __UpperCamelCase ( _A = "input.txt" ):
with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file:
lowerCAmelCase_ = [
[int(_A ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowerCAmelCase_ = len(_A )
lowerCAmelCase_ = len(matrix[0] )
lowerCAmelCase_ = [[-1 for _ in range(_A )] for _ in range(_A )]
for i in range(_A ):
lowerCAmelCase_ = matrix[i][0]
for j in range(1 , _A ):
for i in range(_A ):
lowerCAmelCase_ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _A ):
lowerCAmelCase_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCAmelCase_ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 167
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_A = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
_A , _A = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
_A = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
_A = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_A = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 167
| 1
|
'''simple docstring'''
lowerCAmelCase :str = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCAmelCase :Tuple = ['''a''', '''b''', '''c''', '''d''', '''e''']
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : int = start
# add current to visited
visited.append(__A )
__magic_name__ : Tuple = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__magic_name__ : Dict = topological_sort(__A , __A , __A )
# if all neighbors visited add current to sort
sort.append(__A )
# if all vertices haven't been visited select a new one to visit
if len(__A ) != len(__A ):
for vertice in vertices:
if vertice not in visited:
__magic_name__ : Union[str, Any] = topological_sort(__A , __A , __A )
# return sort
return sort
if __name__ == "__main__":
lowerCAmelCase :List[Any] = topological_sort('''a''', [], [])
print(sort)
| 331
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : int
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter s type must be str.''')
return [s[i:] + s[:i] for i in range(len(__A))]
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter s type must be str.''')
if not s:
raise ValueError('''The parameter s must not be empty.''')
_a = all_rotations(__A)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a = {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(__A),
}
return response
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter bwt_string type must be str.''')
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''')
try:
_a = int(__A)
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''')
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''')
if idx_original_string >= len(__A):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''')
_a = [''''''] * len(__A)
for _ in range(len(__A)):
for i in range(len(__A)):
_a = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase_ = "Provide a string that I will generate its BWT transform: "
lowercase_ = input(entry_msg).strip()
lowercase_ = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
lowercase_ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 211
| 0
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a_ : List[Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=1 ):
"""simple docstring"""
lowerCamelCase_ =tokenizer
lowerCamelCase_ =dataset
lowerCamelCase_ =len(lowerCAmelCase ) if n_tasks is None else n_tasks
lowerCamelCase_ =n_copies
def __iter__( self ):
"""simple docstring"""
lowerCamelCase_ =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =start_length
lowerCamelCase_ =eof_strings
lowerCamelCase_ =tokenizer
def __call__( self, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase_ =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCAmelCase )
def a_ ( __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def a_ ( __snake_case : str , __snake_case : Any , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[int]=20 , **__snake_case : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
lowerCamelCase_ =batch['''ids'''].shape[-1]
lowerCamelCase_ =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
lowerCamelCase_ =batch['''task_id'''].repeat(__snake_case )
lowerCamelCase_ =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase_ =generated_tokens.cpu().numpy()
lowerCamelCase_ =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
lowerCamelCase_ =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase_ =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def a_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase_ =HfArgumentParser(__snake_case )
lowerCamelCase_ =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase_ =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase_ ='''false'''
if args.num_workers is None:
lowerCamelCase_ =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase_ =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
lowerCamelCase_ =AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ =tokenizer.eos_token
lowerCamelCase_ =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase_ ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
lowerCamelCase_ =load_dataset('''openai_humaneval''' )
lowerCamelCase_ =load_metric('''code_eval''' )
lowerCamelCase_ =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
lowerCamelCase_ =args.n_samples // args.batch_size
lowerCamelCase_ =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase_ =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase_ =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(__snake_case , __snake_case )
lowerCamelCase_ =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
lowerCamelCase_ =[]
for task in tqdm(range(__snake_case ) ):
lowerCamelCase_ =human_eval['''test'''][task]['''test''']
lowerCamelCase_ =F'''check({human_eval['test'][task]['entry_point']})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase_, lowerCamelCase_ =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 357
|
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6
| 0
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def a__ ( self , _a , _a , _a ) -> Any:
_A : List[Any] = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_A : Tuple = VideoClassificationPipeline(model=_a , image_processor=_a , top_k=2 )
_A : Optional[Any] = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def a__ ( self , _a , _a ) -> Any:
for example in examples:
_A : Optional[int] = video_classifier(_a )
self.assertEqual(
_a , [
{"""score""": ANY(_a ), """label""": ANY(_a )},
{"""score""": ANY(_a ), """label""": ANY(_a )},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_A : Tuple = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
_A : Any = pipeline(
"""video-classification""" , model=_a , feature_extractor=_a , frame_sampling_rate=4 )
_A : Optional[int] = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_A : Any = video_classifier(_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
_A : Optional[int] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def a__ ( self ) -> str:
pass
| 26
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =FunnelTokenizer
lowercase : List[str] =FunnelTokenizerFast
lowercase : Union[str, Any] =True
lowercase : int =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =[
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='''UNwant\u00E9d,running'''
lowerCamelCase_ ='''unwanted, running'''
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class(self.vocab_file )
lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' )
lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len )
lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
| 75
| 0
|
'''simple docstring'''
from __future__ import annotations
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A__ : str = [8, 5, 9, 7]
A__ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None:
'''simple docstring'''
__snake_case : int = claim_vector
__snake_case : Optional[int] = allocated_resources_table
__snake_case : List[str] = maximum_claim_table
def A_ ( self : str ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : int ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : str ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def A_ ( self : Union[str, Any] , **__a : int ) -> None:
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : List[Any] = self.__allocated_resources_table
__snake_case : Optional[int] = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case : Tuple = False
for each_need in need_list:
__snake_case : Any = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__snake_case : List[str] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : str = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__snake_case : Union[str, Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
| 1
|
def __lowerCAmelCase ( a__ ) -> str:
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(a__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a__ ) == 0 or (len(a__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a__ ) == 0
def __lowerCAmelCase ( ) -> Dict:
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(a__ ):
print(a__ , '''is balanced''' )
else:
print(a__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 6
|
# flake8: noqa
# Lint as: python3
A : Optional[Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 6
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.json'}
__a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
__a = {'mgp-str': 27}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any="[GO]" , lowerCAmelCase__ : int="[GO]" , lowerCAmelCase__ : Optional[Any]="[s]" , lowerCAmelCase__ : int="[GO]" , **lowerCAmelCase__ : List[str] ) -> Any:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase : Tuple = json.load(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return len(self.vocab )
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = []
for s in text:
char_tokens.extend(lowerCAmelCase__ )
return char_tokens
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : int ) -> List[str]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCAmelCase__ ) )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + "\n" )
return (vocab_file,)
| 17
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
_UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" )
if key.startswith("module.decoder" ):
_UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
_UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )]
_UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" )
if "attn.q" in key:
_UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase : Dict = key.replace("attn", "attention.self" )
if "fc1" in key:
_UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" )
if "fc2" in key:
_UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" )
if "linear_pred" in key:
_UpperCAmelCase : Any = key.replace("linear_pred", "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" )
_UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" )
if "skip_conv1" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" )
if "skip_conv2" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" )
if "fusion1" in key:
_UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" )
if "fusion2" in key:
_UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" )
if "fusion3" in key:
_UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" )
if "fusion" in key and "conv" in key:
_UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
_UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" )
_UpperCAmelCase : int = value
return new_state_dict
def __UpperCAmelCase ( a_: str, a_: List[Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ):
_UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase : Dict = GLPNImageProcessor()
# prepare image
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) )
# rename keys
_UpperCAmelCase : List[str] = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_, a_ )
# create HuggingFace model and load state dict
_UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
_UpperCAmelCase : Dict = model(a_ )
_UpperCAmelCase : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_UpperCAmelCase : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_UpperCAmelCase : Dict = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, )
image_processor.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 17
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Dict = KandinskyVaaControlnetPipeline
__lowerCAmelCase : int = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCAmelCase : Any = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCAmelCase : str = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase : Any = False
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return 32
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return 32
@property
def a_ ( self : Tuple ):
"""simple docstring"""
return self.time_input_dim
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return 1_00
@property
def a_ ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[int] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
A_ : Optional[Any] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def a_ ( self : Dict ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Optional[int] = self.dummy_unet
A_ : str = self.dummy_movq
A_ : List[str] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_lowerCamelCase , )
A_ : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a_ ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=0 ):
"""simple docstring"""
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create hint
A_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
A_ : Tuple = torch.manual_seed(_lowerCamelCase )
else:
A_ : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : int = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = '''cpu'''
A_ : List[Any] = self.get_dummy_components()
A_ : Union[str, Any] = self.pipeline_class(**_lowerCamelCase )
A_ : Any = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : str = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
A_ : str = output.images
A_ : Optional[int] = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Optional[int] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
def a_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Dict ):
"""simple docstring"""
A_ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
A_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
A_ : List[str] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 255.0
A_ : Union[str, Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
A_ : Any = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
A_ : str = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
A_ : Tuple = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
A_ : List[str] = '''A robot, 4k photo'''
A_ : Optional[int] = torch.Generator(device='''cuda''' ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
A_ : List[str] = torch.Generator(device='''cuda''' ).manual_seed(0 )
A_ : Tuple = pipeline(
image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=1_00 , output_type='''np''' , )
A_ : Any = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 167
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase ( unittest.TestCase):
def __init__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : List[Any]=2_24 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[str]=4_00 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = image_size
A_ : List[Any] = min_resolution
A_ : str = max_resolution
A_ : Dict = do_resize
A_ : Dict = size
A_ : str = do_normalize
A_ : List[str] = image_mean
A_ : List[str] = image_std
def a_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def a_ ( self : str ):
"""simple docstring"""
pass
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : str = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : str ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : List[str] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 167
| 1
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Tuple:
random.seed(__lowerCamelCase )
np.random.seed(__lowerCamelCase )
torch.manual_seed(__lowerCamelCase )
torch.cuda.manual_seed_all(__lowerCamelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , _lowerCamelCase : Iterable[torch.nn.Parameter] , _lowerCamelCase : float = 0.9_9_9_9 , _lowerCamelCase : float = 0.0 , _lowerCamelCase : int = 0 , _lowerCamelCase : bool = False , _lowerCamelCase : Union[float, int] = 1.0 , _lowerCamelCase : Union[float, int] = 2 / 3 , _lowerCamelCase : Optional[Any] = None , _lowerCamelCase : Dict[str, Any] = None , **_lowerCamelCase : Tuple , ):
if isinstance(_a , torch.nn.Module ):
_snake_case = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , _a , standard_warn=_a , )
_snake_case = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_snake_case = True
if kwargs.get('''max_value''' , _a ) is not None:
_snake_case = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate('''max_value''' , '''1.0.0''' , _a , standard_warn=_a )
_snake_case = kwargs["max_value"]
if kwargs.get('''min_value''' , _a ) is not None:
_snake_case = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate('''min_value''' , '''1.0.0''' , _a , standard_warn=_a )
_snake_case = kwargs["min_value"]
_snake_case = list(_a )
_snake_case = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , _a ) is not None:
_snake_case = "The `device` argument is deprecated. Please use `to` instead."
deprecate('''device''' , '''1.0.0''' , _a , standard_warn=_a )
self.to(device=kwargs['''device'''] )
_snake_case = None
_snake_case = decay
_snake_case = min_decay
_snake_case = update_after_step
_snake_case = use_ema_warmup
_snake_case = inv_gamma
_snake_case = power
_snake_case = 0
_snake_case = None # set in `step()`
_snake_case = model_cls
_snake_case = model_config
@classmethod
def lowercase ( cls : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ):
_snake_case = model_cls.load_config(_a , return_unused_kwargs=_a )
_snake_case = model_cls.from_pretrained(_a )
_snake_case = cls(model.parameters() , model_cls=_a , model_config=model.config )
ema_model.load_state_dict(_a )
return ema_model
def lowercase ( self : List[Any] , _lowerCamelCase : str ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_snake_case = self.model_cls.from_config(self.model_config )
_snake_case = self.state_dict()
state_dict.pop('''shadow_params''' , _a )
model.register_to_config(**_a )
self.copy_to(model.parameters() )
model.save_pretrained(_a )
def lowercase ( self : str , _lowerCamelCase : int ):
_snake_case = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_snake_case = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_snake_case = (1 + step) / (10 + step)
_snake_case = min(_a , self.decay )
# make sure decay is not smaller than min_decay
_snake_case = max(_a , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase ( self : List[str] , _lowerCamelCase : Iterable[torch.nn.Parameter] ):
if isinstance(_a , torch.nn.Module ):
_snake_case = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , _a , standard_warn=_a , )
_snake_case = parameters.parameters()
_snake_case = list(_a )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_snake_case = self.get_decay(self.optimization_step )
_snake_case = decay
_snake_case = 1 - decay
_snake_case = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _a ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_snake_case = deepspeed.zero.GatheredParameters(_a , modifier_rank=_a )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_a )
def lowercase ( self : List[str] , _lowerCamelCase : Iterable[torch.nn.Parameter] ):
_snake_case = list(_a )
for s_param, param in zip(self.shadow_params , _a ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase ( self : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[str]=None ):
_snake_case = [
p.to(device=_a , dtype=_a ) if p.is_floating_point() else p.to(device=_a )
for p in self.shadow_params
]
def lowercase ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase ( self : Optional[int] , _lowerCamelCase : Iterable[torch.nn.Parameter] ):
_snake_case = [param.detach().cpu().clone() for param in parameters]
def lowercase ( self : List[str] , _lowerCamelCase : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , _a ):
param.data.copy_(c_param.data )
# Better memory-wise.
_snake_case = None
def lowercase ( self : int , _lowerCamelCase : dict ):
_snake_case = copy.deepcopy(_a )
_snake_case = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_snake_case = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , _a ):
raise ValueError('''Invalid min_decay''' )
_snake_case = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , _a ):
raise ValueError('''Invalid optimization_step''' )
_snake_case = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , _a ):
raise ValueError('''Invalid update_after_step''' )
_snake_case = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _a ):
raise ValueError('''Invalid use_ema_warmup''' )
_snake_case = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_snake_case = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_snake_case = state_dict.get('''shadow_params''' , _a )
if shadow_params is not None:
_snake_case = shadow_params
if not isinstance(self.shadow_params , _a ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(_a , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 369
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
_snake_case = [False] * len(__lowerCamelCase )
_snake_case = []
queue.append(__lowerCamelCase )
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
_snake_case = True
_snake_case = u
return visited[t]
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> Dict:
# This array is filled by BFS and to store path
_snake_case = [-1] * (len(__lowerCamelCase ))
_snake_case = 0
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = float('''Inf''' )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(__lowerCamelCase , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
return max_flow
UpperCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase__ , UpperCAmelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 40
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 194
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 6
| 0
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{\"default\": {\"dataset_size\": 42}}' )
__lowerCamelCase : Union[str, Any] = DatasetInfosDict.from_directory(_a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = str(_a )
dataset_info.write_to_directory(_a )
__lowerCamelCase : Optional[Any] = DatasetInfo.from_directory(_a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_a , 'dataset_info.json' ) )
def UpperCamelCase__ ( ):
__lowerCamelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
__lowerCamelCase : List[str] = dataset_info._to_yaml_dict()
assert sorted(_a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__lowerCamelCase : Optional[Any] = yaml.safe_dump(_a )
__lowerCamelCase : List[str] = yaml.safe_load(_a )
assert dataset_info_yaml_dict == reloaded
def UpperCamelCase__ ( ):
__lowerCamelCase : int = DatasetInfo()
__lowerCamelCase : Optional[int] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = str(_a )
dataset_infos_dict.write_to_directory(_a )
__lowerCamelCase : Optional[int] = DatasetInfosDict.from_directory(_a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowerCamelCase : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowerCamelCase : int = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_a , 'README.md' ) )
| 361
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE__ ) as metadata_file:
__lowerCamelCase : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__lowerCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
# Load the entity vocab file
__lowerCamelCase : List[Any] = load_entity_vocab(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCamelCase : str = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Initialize the embeddings of the special tokens
__lowerCamelCase : Union[str, Any] = state_dict['embeddings.word_embeddings.weight']
__lowerCamelCase : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__lowerCamelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__lowerCamelCase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCamelCase : Optional[int] = f'encoder.layer.{layer_index}.attention.self.'
__lowerCamelCase : Dict = state_dict[prefix + matrix_name]
__lowerCamelCase : List[Any] = state_dict[prefix + matrix_name]
__lowerCamelCase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCamelCase : Optional[int] = state_dict['entity_embeddings.entity_embeddings.weight']
__lowerCamelCase : Union[str, Any] = entity_emb[entity_vocab['[MASK]']]
__lowerCamelCase : Optional[Any] = LukeModel(config=SCREAMING_SNAKE_CASE__ ).eval()
__lowerCamelCase , __lowerCamelCase : List[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if not (len(SCREAMING_SNAKE_CASE__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(SCREAMING_SNAKE_CASE__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__lowerCamelCase : Optional[Any] = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='entity_classification' )
__lowerCamelCase : Dict = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__lowerCamelCase : Union[str, Any] = (39, 42)
__lowerCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , add_prefix_space=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__ )
# Verify word hidden states
if model_size == "large":
__lowerCamelCase : Dict = torch.Size((1, 42, 1_024) )
__lowerCamelCase : int = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
__lowerCamelCase : Union[str, Any] = torch.Size((1, 42, 768) )
__lowerCamelCase : Tuple = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1, 1_024) )
__lowerCamelCase : Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
__lowerCamelCase : int = torch.Size((1, 1, 768) )
__lowerCamelCase : Dict = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE__ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = {}
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : List[Any] = line.rstrip().split('\t' )
__lowerCamelCase : Any = index
return entity_vocab
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowercase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 194
| 0
|
from __future__ import annotations
def _a ( a :float , a :float , a :float ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
|
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
| 1
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int:
'''simple docstring'''
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : Any = use_cache
| 298
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]="[GO]", UpperCAmelCase__ : str="[GO]", UpperCAmelCase__ : Dict="[s]", UpperCAmelCase__ : Union[str, Any]="[GO]", **UpperCAmelCase__ : Tuple ):
super().__init__(
unk_token=UpperCAmelCase__, bos_token=UpperCAmelCase__, eos_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, **UpperCAmelCase__, )
with open(UpperCAmelCase__, encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(UpperCAmelCase__ )
__lowercase = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : List[Any] ):
return len(self.vocab )
def _lowercase ( self : int ):
return dict(self.vocab, **self.added_tokens_encoder )
def _lowercase ( self : Any, UpperCAmelCase__ : Tuple ):
__lowercase = []
for s in text:
char_tokens.extend(UpperCAmelCase__ )
return char_tokens
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Any ):
return self.vocab.get(UpperCAmelCase__, self.vocab.get(self.unk_token ) )
def _lowercase ( self : int, UpperCAmelCase__ : List[str] ):
return self.decoder.get(UpperCAmelCase__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCAmelCase__ ) )
return
__lowercase = os.path.join(
UpperCAmelCase__, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(UpperCAmelCase__, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=UpperCAmelCase__, ensure_ascii=UpperCAmelCase__ ) + "\n" )
return (vocab_file,)
| 17
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str =logging.get_logger()
@dataclass
class _A :
snake_case__ : nn.Module
snake_case__ : List[nn.Module] = field(default_factory=lowerCamelCase__ )
snake_case__ : list = field(default_factory=lowerCamelCase__ )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = len(list(m.modules() ) ) == 1 or isinstance(lowercase__ , nn.Convad ) or isinstance(lowercase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase__ )
def __call__( self , __lowerCAmelCase ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase__ )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self ):
"""simple docstring"""
return list(filter(lambda __lowerCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _A :
snake_case__ : nn.Module
snake_case__ : nn.Module
snake_case__ : int = 1
snake_case__ : List = field(default_factory=lowerCamelCase__ )
snake_case__ : List = field(default_factory=lowerCamelCase__ )
snake_case__ : bool = True
def __call__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = Tracker(self.dest )(lowercase__ ).parametrized
lowercase = Tracker(self.src )(lowercase__ ).parametrized
lowercase = list(filter(lambda __lowerCAmelCase : type(lowercase__ ) not in self.src_skip , lowercase__ ) )
lowercase = list(filter(lambda __lowerCAmelCase : type(lowercase__ ) not in self.dest_skip , lowercase__ ) )
if len(lowercase__ ) != len(lowercase__ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(lowercase__ )} operations while'
f' destination module has {len(lowercase__ )}.' )
for dest_m, src_m in zip(lowercase__ , lowercase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowercase = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), f'Unexpected layer name {k}'
lowercase = len(lowercase__ ) + 1
feature_blocks.append((f'res{block_index}', v) )
lowercase = nn.ModuleDict(lowercase__ )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return get_trunk_forward_outputs(
lowercase__ , out_feat_keys=lowercase__ , feature_blocks=self._feature_blocks , )
class _A ( lowerCamelCase__ ):
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
if x not in self:
lowercase = self.convert_name_to_timm(lowercase__ )
lowercase = partial(lambda: (timm.create_model(lowercase__ , pretrained=lowercase__ ).eval(), None) )
else:
lowercase = super().__getitem__(lowercase__ )
return val
class _A ( lowerCamelCase__ ):
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
lowercase = RegNetModel
else:
lowercase = RegNetForImageClassification
return val
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str ) -> Optional[Any]:
'''simple docstring'''
for from_key, to_key in keys:
lowercase = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict = True , ) -> Any:
'''simple docstring'''
print(f'Converting {name}...' )
with torch.no_grad():
lowercase , lowercase = from_model_func()
lowercase = our_model_func(A__ ).eval()
lowercase = ModuleTransfer(src=A__ , dest=A__ , raise_if_mismatch=A__ )
lowercase = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(A__ )
if from_state_dict is not None:
lowercase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowercase = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
lowercase = manually_copy_vissl_head(A__ , our_model.state_dict() , A__ )
our_model.load_state_dict(A__ )
lowercase = our_model(A__ , output_hidden_states=A__ )
lowercase = (
our_outputs.logits if isinstance(A__ , A__ ) else our_outputs.last_hidden_state
)
lowercase = from_model(A__ )
lowercase = from_output[-1] if type(A__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowercase = our_outputs.hidden_states[-1]
assert torch.allclose(A__ , A__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=A__ , )
lowercase = 2_2_4 if """seer""" not in name else 3_8_4
# we can use the convnext one
lowercase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=A__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=A__ , )
print(f'Pushed {name}' )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] = None , lowerCAmelCase__ :Any = True ) -> Optional[Any]:
'''simple docstring'''
lowercase = """imagenet-1k-id2label.json"""
lowercase = 1_0_0_0
lowercase = (1, num_labels)
lowercase = """huggingface/label-files"""
lowercase = num_labels
lowercase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase = {int(A__ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = partial(A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ )
lowercase = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
lowercase = NameToOurModelFuncMap()
lowercase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict ) -> Tuple[nn.Module, Dict]:
lowercase = torch.hub.load_state_dict_from_url(A__ , model_dir=str(A__ ) , map_location="""cpu""" )
lowercase = model_func()
# check if we have a head, if yes add it
lowercase = files["""classy_state_dict"""]["""base_model"""]["""model"""]
lowercase = model_state_dict["""trunk"""]
model.load_state_dict(A__ )
return model.eval(), model_state_dict["heads"]
# pretrained
lowercase = partial(
A__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase = partial(
A__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase = partial(
A__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowercase = partial(
A__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.52 ) ) ) , )
# IN1K finetuned
lowercase = partial(
A__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase = partial(
A__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase = partial(
A__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowercase = partial(
A__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
A__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , A__ , A__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
A__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , A__ , A__ , A__ , )
return config, expected_shape
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__lowerCAmelCase : List[str] =parser.parse_args()
__lowerCAmelCase : Optional[int] =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 357
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowercase ) , """Tatoeba directory does not exist.""" )
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Dict = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 41
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int):
a : List[Any] = metric_id
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : List[str]):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
if "tmp_path" in args:
a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 40
| 0
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = get_activation('swish')
self.assertIsInstance(_UpperCAmelCase , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = get_activation('silu')
self.assertIsInstance(_UpperCAmelCase , nn.SiLU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = get_activation('mish')
self.assertIsInstance(_UpperCAmelCase , nn.Mish)
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = get_activation('gelu')
self.assertIsInstance(_UpperCAmelCase , nn.GELU)
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
| 367
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int | str] ) -> None:
create_state_space_tree(__snake_case , [] , 0 , [0 for i in range(len(__snake_case ) )] )
def _lowerCAmelCase ( __snake_case : list[int | str] , __snake_case : list[int | str] , __snake_case : int , __snake_case : list[int] , ) -> None:
if index == len(__snake_case ):
print(__snake_case )
return
for i in range(len(__snake_case ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__A : Any = True
create_state_space_tree(__snake_case , __snake_case , index + 1 , __snake_case )
current_sequence.pop()
__A : Any = False
lowercase__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowercase__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 190
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''pixel_values''']
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PIL.Image.BICUBIC ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
__lowerCamelCase : int = get_size_dict(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCamelCase : Any = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name='crop_size')
__lowerCamelCase : Optional[int] = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Any = resample
__lowerCamelCase : Optional[int] = do_center_crop
__lowerCamelCase : Any = crop_size
__lowerCamelCase : Optional[int] = do_rescale
__lowerCamelCase : int = rescale_factor
__lowerCamelCase : str = do_normalize
__lowerCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PIL.Image.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Any ,):
__lowerCamelCase : List[str] = get_size_dict(SCREAMING_SNAKE_CASE__)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return resize(
SCREAMING_SNAKE_CASE__ ,size=(size['height'], size['width']) ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : str ,):
__lowerCamelCase : List[str] = get_size_dict(SCREAMING_SNAKE_CASE__)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return center_crop(SCREAMING_SNAKE_CASE__ ,size=(size['height'], size['width']) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : str ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Dict ,):
__lowerCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : int = resample if resample is not None else self.resample
__lowerCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Any = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : str = image_std if image_std is not None else self.image_std
__lowerCamelCase : Union[str, Any] = size if size is not None else self.size
__lowerCamelCase : Any = get_size_dict(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name='crop_size')
__lowerCamelCase : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE__)
if not valid_images(SCREAMING_SNAKE_CASE__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__lowerCamelCase : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__) for image in images]
if do_resize:
__lowerCamelCase : Any = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__) for image in images]
if do_center_crop:
__lowerCamelCase : Dict = [self.center_crop(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__) for image in images]
if do_rescale:
__lowerCamelCase : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__) for image in images]
if do_normalize:
__lowerCamelCase : Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Optional[int] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__)
| 73
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
_UpperCamelCase = [image]
if isinstance(image[0], PIL.Image.Image ):
_UpperCamelCase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_UpperCamelCase = np.concatenate(__snake_case, axis=0 )
_UpperCamelCase = np.array(__snake_case ).astype(np.floataa ) / 255.0
_UpperCamelCase = image.transpose(0, 3, 1, 2 )
_UpperCamelCase = 2.0 * image - 1.0
_UpperCamelCase = torch.from_numpy(__snake_case )
elif isinstance(image[0], torch.Tensor ):
_UpperCamelCase = torch.cat(__snake_case, dim=0 )
return image
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=0.9995 ) -> List[Any]:
"""simple docstring"""
if not isinstance(__snake_case, np.ndarray ):
_UpperCamelCase = True
_UpperCamelCase = va.device
_UpperCamelCase = va.cpu().numpy()
_UpperCamelCase = va.cpu().numpy()
_UpperCamelCase = np.sum(va * va / (np.linalg.norm(__snake_case ) * np.linalg.norm(__snake_case )) )
if np.abs(__snake_case ) > DOT_THRESHOLD:
_UpperCamelCase = (1 - t) * va + t * va
else:
_UpperCamelCase = np.arccos(__snake_case )
_UpperCamelCase = np.sin(__snake_case )
_UpperCamelCase = theta_a * t
_UpperCamelCase = np.sin(__snake_case )
_UpperCamelCase = np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCamelCase = sin_theta_t / sin_theta_a
_UpperCamelCase = sa * va + sa * va
if inputs_are_torch:
_UpperCamelCase = torch.from_numpy(__snake_case ).to(__snake_case )
return va
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = F.normalize(__snake_case, dim=-1 )
_UpperCamelCase = F.normalize(__snake_case, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
for param in model.parameters():
_UpperCamelCase = value
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a , __a , __a , __a , __a , __a=None , __a=None , __a=None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__a , text_encoder=__a , clip_model=__a , tokenizer=__a , unet=__a , scheduler=__a , feature_extractor=__a , coca_model=__a , coca_tokenizer=__a , coca_transform=__a , )
_UpperCamelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , __a)
else feature_extractor.size['''shortest_edge''']
)
_UpperCamelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , __a)
set_requires_grad(self.clip_model , __a)
def UpperCAmelCase ( self , __a = "auto") -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
set_requires_grad(self.vae , __a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
set_requires_grad(self.vae , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
set_requires_grad(self.unet , __a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
set_requires_grad(self.unet , __a)
def UpperCAmelCase ( self , __a , __a , __a) -> Any:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCamelCase = min(int(num_inference_steps * strength) , __a)
_UpperCamelCase = max(num_inference_steps - init_timestep , 0)
_UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=None) -> Tuple:
'''simple docstring'''
if not isinstance(__a , torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(__a)}''')
_UpperCamelCase = image.to(device=__a , dtype=__a)
if isinstance(__a , __a):
_UpperCamelCase = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(__a)
]
_UpperCamelCase = torch.cat(__a , dim=0)
else:
_UpperCamelCase = self.vae.encode(__a).latent_dist.sample(__a)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 0.1_8215 * init_latents
_UpperCamelCase = init_latents.repeat_interleave(__a , dim=0)
_UpperCamelCase = randn_tensor(init_latents.shape , generator=__a , device=__a , dtype=__a)
# get latents
_UpperCamelCase = self.scheduler.add_noise(__a , __a , __a)
_UpperCamelCase = init_latents
return latents
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = self.coca_transform(__a).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCamelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
_UpperCamelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('''<end_of_text>''')[0].replace('''<start_of_text>''' , '''''').rstrip(''' .,''')
def UpperCAmelCase ( self , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.feature_extractor.preprocess(__a)
_UpperCamelCase = torch.from_numpy(clip_image_input['''pixel_values'''][0]).unsqueeze(0).to(self.device).half()
_UpperCamelCase = self.clip_model.get_image_features(__a)
_UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a)
_UpperCamelCase = image_embeddings_clip.repeat_interleave(__a , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = latents.detach().requires_grad_()
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
# predict the noise residual
_UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_UpperCamelCase = self.scheduler.alphas_cumprod[timestep]
_UpperCamelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCamelCase = torch.sqrt(__a)
_UpperCamelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.sigmas[index]
_UpperCamelCase = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 1 / 0.1_8215 * sample
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = transforms.Resize(self.feature_extractor_size)(__a)
_UpperCamelCase = self.normalize(__a).to(latents.dtype)
_UpperCamelCase = self.clip_model.get_image_features(__a)
_UpperCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__a)
_UpperCamelCase = spherical_dist_loss(__a , __a).mean() * clip_guidance_scale
_UpperCamelCase = -torch.autograd.grad(__a , __a)[0]
if isinstance(self.scheduler , __a):
_UpperCamelCase = latents.detach() + grads * (sigma**2)
_UpperCamelCase = noise_pred_original
else:
_UpperCamelCase = noise_pred_original - torch.sqrt(__a) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , __a , __a , __a = None , __a = None , __a = 5_12 , __a = 5_12 , __a = 0.6 , __a = 50 , __a = 7.5 , __a = 1 , __a = 0.0 , __a = 1_00 , __a = None , __a = "pil" , __a = True , __a = 0.8 , __a = 0.1 , __a = 0.1 , ) -> Dict:
'''simple docstring'''
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(__a)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(__a , torch.Generator) and batch_size > 1:
_UpperCamelCase = [generator] + [None] * (batch_size - 1)
_UpperCamelCase = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_UpperCamelCase = [x[0] for x in coca_is_none if x[1]]
_UpperCamelCase = ''', '''.join(__a)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__a):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_UpperCamelCase = self.get_image_description(__a)
if style_prompt is None:
if len(__a):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_UpperCamelCase = self.get_image_description(__a)
# get prompt text embeddings for content and style
_UpperCamelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_UpperCamelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_UpperCamelCase = slerp(__a , __a , __a)
# duplicate text embeddings for each generation per prompt
_UpperCamelCase = text_embeddings.repeat_interleave(__a , dim=0)
# set timesteps
_UpperCamelCase = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_UpperCamelCase = {}
if accepts_offset:
_UpperCamelCase = 1
self.scheduler.set_timesteps(__a , **__a)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_UpperCamelCase , _UpperCamelCase = self.get_timesteps(__a , __a , self.device)
_UpperCamelCase = timesteps[:1].repeat(__a)
# Preprocess image
_UpperCamelCase = preprocess(__a , __a , __a)
_UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a)
_UpperCamelCase = preprocess(__a , __a , __a)
_UpperCamelCase = self.prepare_latents(
__a , __a , __a , text_embeddings.dtype , self.device , __a)
_UpperCamelCase = slerp(__a , __a , __a)
if clip_guidance_scale > 0:
_UpperCamelCase = self.get_clip_image_embeddings(__a , __a)
_UpperCamelCase = self.get_clip_image_embeddings(__a , __a)
_UpperCamelCase = slerp(
__a , __a , __a)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase = content_text_input.input_ids.shape[-1]
_UpperCamelCase = self.tokenizer([''''''] , padding='''max_length''' , max_length=__a , return_tensors='''pt''')
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCamelCase = uncond_embeddings.repeat_interleave(__a , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCamelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCamelCase = torch.randn(__a , generator=__a , device='''cpu''' , dtype=__a).to(
self.device)
else:
_UpperCamelCase = torch.randn(__a , generator=__a , device=self.device , dtype=__a)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_UpperCamelCase = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
# check if the scheduler accepts generator
_UpperCamelCase = '''generator''' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_UpperCamelCase = generator
with self.progress_bar(total=__a):
for i, t in enumerate(__a):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
# predict the noise residual
_UpperCamelCase = self.unet(__a , __a , encoder_hidden_states=__a).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = noise_pred.chunk(2)
_UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCamelCase = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCamelCase , _UpperCamelCase = self.cond_fn(
__a , __a , __a , __a , __a , __a , __a , )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__a , __a , __a , **__a).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCamelCase = 1 / 0.1_8215 * latents
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__a , nsfw_content_detected=__a)
| 194
| 0
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
def is_in_circle(__lowercase : float ,__lowercase : float ) -> bool:
A_ : Tuple = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
A_ : Dict = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(__lowercase ) )
# The ratio of the area for circle to square is pi/4.
A_ : int = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def UpperCamelCase ( __lowercase : int ,__lowercase : Callable[[float], float] ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowercase ,__lowercase ) ) for _ in range(__lowercase ) ) * (max_value - min_value)
def UpperCamelCase ( __lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ):
'''simple docstring'''
def identity_function(__lowercase : float ) -> float:
return x
A_ : List[Any] = area_under_curve_estimator(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : str = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
def function_to_integrate(__lowercase : float ) -> float:
return sqrt(4.0 - x * x )
A_ : List[str] = area_under_curve_estimator(
__lowercase ,__lowercase ,0.0 ,2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
_UpperCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = input('Enter message: ' )
A_ : int = input('Enter key [alphanumeric]: ' )
A_ : Optional[Any] = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
A_ : List[Any] = 'encrypt'
A_ : int = encrypt_message(__lowercase ,__lowercase )
elif mode.lower().startswith('d' ):
A_ : Optional[Any] = 'decrypt'
A_ : Dict = decrypt_message(__lowercase ,__lowercase )
print(f'''\n{mode.title()}ed message:''' )
print(__lowercase )
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
return translate_message(__lowercase ,__lowercase ,'encrypt' )
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
return translate_message(__lowercase ,__lowercase ,'decrypt' )
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : str ):
'''simple docstring'''
A_ : Tuple = []
A_ : str = 0
A_ : Optional[int] = key.upper()
for symbol in message:
A_ : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowercase ):
A_ : str = 0
else:
translated.append(__lowercase )
return "".join(__lowercase )
if __name__ == "__main__":
main()
| 192
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 2_5_5 , _UpperCAmelCase=True , ) -> int:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCamelCase : Any = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
__UpperCamelCase : Any = parent
__UpperCamelCase : Optional[Any] = batch_size
__UpperCamelCase : List[Any] = num_channels
__UpperCamelCase : Union[str, Any] = min_resolution
__UpperCamelCase : Dict = max_resolution
__UpperCamelCase : List[str] = do_resize
__UpperCamelCase : Optional[int] = size
__UpperCamelCase : List[str] = do_normalize
__UpperCamelCase : str = image_mean
__UpperCamelCase : Dict = image_std
__UpperCamelCase : Dict = do_rescale
__UpperCamelCase : int = rescale_factor
__UpperCamelCase : int = do_pad
def a_ (self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> List[Any]:
if not batched:
__UpperCamelCase : str = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase : Dict = image.size
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__UpperCamelCase : Tuple = int(self.size["shortest_edge"] * h / w )
__UpperCamelCase : Optional[Any] = self.size["shortest_edge"]
elif w > h:
__UpperCamelCase : Optional[Any] = self.size["shortest_edge"]
__UpperCamelCase : str = int(self.size["shortest_edge"] * w / h )
else:
__UpperCamelCase : List[str] = self.size["shortest_edge"]
__UpperCamelCase : Tuple = self.size["shortest_edge"]
else:
__UpperCamelCase : Optional[Any] = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__UpperCamelCase : str = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = DeformableDetrImageProcessor if is_vision_available() else None
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = DeformableDetrImageProcessingTester(self )
@property
def a_ (self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> str:
__UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
__UpperCamelCase : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase , __UpperCamelCase : str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[str] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[str] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a_ (self ) -> List[Any]:
# prepare image and target
__UpperCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__UpperCamelCase : List[Any] = json.loads(f.read() )
__UpperCamelCase : Dict = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
__UpperCamelCase : List[str] = DeformableDetrImageProcessor()
__UpperCamelCase : Union[str, Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors="pt" )
# verify pixel values
__UpperCamelCase : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCAmelCase )
__UpperCamelCase : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify area
__UpperCamelCase : Any = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCAmelCase ) )
# verify boxes
__UpperCamelCase : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCAmelCase )
__UpperCamelCase : Any = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__UpperCamelCase : Optional[Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCAmelCase ) )
# verify is_crowd
__UpperCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCAmelCase ) )
# verify class_labels
__UpperCamelCase : List[str] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCAmelCase ) )
# verify orig_size
__UpperCamelCase : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCAmelCase ) )
# verify size
__UpperCamelCase : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCAmelCase ) )
@slow
def a_ (self ) -> str:
# prepare image, target and masks_path
__UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__UpperCamelCase : Dict = json.loads(f.read() )
__UpperCamelCase : Union[str, Any] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
__UpperCamelCase : str = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__UpperCamelCase : List[Any] = DeformableDetrImageProcessor(format="coco_panoptic" )
__UpperCamelCase : List[Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors="pt" )
# verify pixel values
__UpperCamelCase : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCAmelCase )
__UpperCamelCase : Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify area
__UpperCamelCase : List[Any] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCAmelCase ) )
# verify boxes
__UpperCamelCase : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCAmelCase )
__UpperCamelCase : List[str] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__UpperCamelCase : Optional[Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCAmelCase ) )
# verify is_crowd
__UpperCamelCase : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCAmelCase ) )
# verify class_labels
__UpperCamelCase : Dict = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCAmelCase ) )
# verify masks
__UpperCamelCase : Dict = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCAmelCase )
# verify orig_size
__UpperCamelCase : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCAmelCase ) )
# verify size
__UpperCamelCase : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCAmelCase ) )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298
| 1
|
"""simple docstring"""
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 79
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class __UpperCamelCase ( a__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase : str =field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase : ClassVar[Features] =Features({"""text""": Value("""string""" )} )
lowerCamelCase : ClassVar[Features] =Features({"""summary""": Value("""string""" )} )
lowerCamelCase : str ="text"
lowerCamelCase : str ="summary"
@property
def __a ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 79
| 1
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)])
def lowerCamelCase ( self : Optional[int] , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = GenerationConfig(
do_sample=_snake_case , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , config_name=_snake_case)
UpperCAmelCase_ = GenerationConfig.from_pretrained(_snake_case , config_name=_snake_case)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _snake_case)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''gpt2''')
UpperCAmelCase_ = GenerationConfig.from_model_config(_snake_case)
UpperCAmelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_snake_case , _snake_case)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = GenerationConfig()
UpperCAmelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
UpperCAmelCase_ = copy.deepcopy(_snake_case)
UpperCAmelCase_ = generation_config.update(**_snake_case)
# update_kwargs was not modified (no side effects)
self.assertEqual(_snake_case , _snake_case)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_snake_case , {'''foo''': '''bar'''})
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GenerationConfig()
UpperCAmelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir:
generation_config.save_pretrained(_snake_case)
UpperCAmelCase_ = GenerationConfig.from_pretrained(_snake_case)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''')
UpperCAmelCase_ = GenerationConfig.from_model_config(_snake_case)
assert not hasattr(_snake_case , '''foo''') # no new kwargs should be initialized if from config
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , _snake_case)
self.assertEqual(default_config.num_beams , 1)
UpperCAmelCase_ = GenerationConfig(
do_sample=_snake_case , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , _snake_case)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case)
UpperCAmelCase_ = GenerationConfig.from_pretrained(_snake_case , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , _snake_case)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Tuple):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : Tuple):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''')
except HTTPError:
pass
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = GenerationConfig(
do_sample=_snake_case , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token)
UpperCAmelCase_ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''test-generation-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = GenerationConfig(
do_sample=_snake_case , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
| 51
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ : Tuple = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''facebook/nllb-200-distilled-600M'''
snake_case__ : Union[str, Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
snake_case__ : Optional[Any] = '''translator'''
snake_case__ : Tuple = AutoTokenizer
snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM
snake_case__ : Dict = LANGUAGE_CODES
snake_case__ : str = ['''text''', '''text''', '''text''']
snake_case__ : Tuple = ['''text''']
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
a_ : str = self.lang_to_code[src_lang]
a_ : Any = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 32
| 0
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
__lowercase = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
__lowercase = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__lowercase = BeautifulSoup(res.text, '''html.parser''')
__lowercase = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(f'''https://google.com{link.get("href")}''')
| 85
|
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 3 , __UpperCamelCase : int = 7 , __UpperCamelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =0
__UpperCamelCase =1
for current_denominator in range(1 , limit + 1 ):
__UpperCamelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__UpperCamelCase =current_numerator
__UpperCamelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 85
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = """utf-8"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True # deprecated
SCREAMING_SNAKE_CASE__ = None # deprecated
SCREAMING_SNAKE_CASE__ = 10 << 20 # 10MB
SCREAMING_SNAKE_CASE__ = None
class __A( datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = JsonConfig
def UpperCAmelCase_ (self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCamelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
UpperCamelCase__ = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase__ = [files]
UpperCamelCase__ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase__ = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase__ = [files]
UpperCamelCase__ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase__ = self.config.features.arrow_schema.field(_UpperCAmelCase ).type
UpperCamelCase__ = pa_table.append_column(_UpperCAmelCase , pa.array([None] * len(_UpperCAmelCase ) , type=_UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase__ = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase__ = json.load(_UpperCAmelCase )
# We keep only the field we are interested in
UpperCamelCase__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_UpperCAmelCase , (list, tuple) ):
UpperCamelCase__ = set().union(*[row.keys() for row in dataset] )
UpperCamelCase__ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
else:
UpperCamelCase__ = dataset
UpperCamelCase__ = pa.Table.from_pydict(_UpperCAmelCase )
yield file_idx, self._cast_table(_UpperCAmelCase )
# If the file has one json object per line
else:
with open(_UpperCAmelCase , """rb""" ) as f:
UpperCamelCase__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase__ = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
UpperCamelCase__ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase__ = batch.decode(self.config.encoding , errors=_UpperCAmelCase ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase__ = paj.read_json(
io.BytesIO(_UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=_UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_UpperCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(_UpperCAmelCase )
or block_size > len(_UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(_UpperCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase__ = json.load(_UpperCAmelCase )
except json.JSONDecodeError:
logger.error(F"Failed to read file \'{file}\' with error {type(_UpperCAmelCase )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_UpperCAmelCase , _UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
UpperCamelCase__ = set().union(*[row.keys() for row in dataset] )
UpperCamelCase__ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
UpperCamelCase__ = pa.Table.from_pydict(_UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file \'{file}\' with error {type(_UpperCAmelCase )}: {e}" )
raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(_UpperCAmelCase )
break
else:
logger.error(F"Failed to read file \'{file}\' with error {type(_UpperCAmelCase )}: {e}" )
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
F"Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCAmelCase )
batch_idx += 1
| 244
|
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]:
__A : List[str] = k_size // 2
__A ,__A : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__A : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(__snake_case ) + square(__snake_case )) / (2 * square(__snake_case )) )
return g
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : int ) -> Union[str, Any]:
__A ,__A : Tuple = image.shape[0], image.shape[1]
# dst image height and width
__A : Tuple = height - k_size + 1
__A : Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__A : str = zeros((dst_height * dst_width, k_size * k_size) )
__A : Optional[Any] = 0
for i, j in product(range(__snake_case ) , range(__snake_case ) ):
__A : int = ravel(image[i : i + k_size, j : j + k_size] )
__A : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
__A : List[Any] = gen_gaussian_kernel(__snake_case , __snake_case )
__A : Any = ravel(__snake_case )
# reshape and get the dst image
__A : Dict = dot(__snake_case , __snake_case ).reshape(__snake_case , __snake_case ).astype(__snake_case )
return dst
if __name__ == "__main__":
# read original image
lowercase__ : List[Any] = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase__ : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase__ : Any = gaussian_filter(gray, 3, sigma=1)
lowercase__ : str = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 190
| 0
|
from math import factorial
def lowerCamelCase_ ( UpperCamelCase__ : int = 20 ):
'''simple docstring'''
UpperCamelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase__ = n // 2
return int(factorial(UpperCamelCase__ ) / (factorial(UpperCamelCase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
lowercase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 35
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase = get_logger(__name__)
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , _a : Optional[str] = None ):
UpperCamelCase__ = (
os.path.join(_a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase__ = Extractor
def A_ ( self : str , _a : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase__ = os.path.abspath(_a )
return os.path.join(self.extract_dir , hash_url_to_filename(_a ) )
def A_ ( self : Optional[Any] , _a : str , _a : bool ):
return force_extract or (
not os.path.isfile(_a ) and not (os.path.isdir(_a ) and os.listdir(_a ))
)
def A_ ( self : int , _a : str , _a : bool = False ):
UpperCamelCase__ = self.extractor.infer_extractor_format(_a )
if not extractor_format:
return input_path
UpperCamelCase__ = self._get_output_path(_a )
if self._do_extract(_a , _a ):
self.extractor.extract(_a , _a , _a )
return output_path
class __lowercase ( A ):
'''simple docstring'''
@classmethod
@abstractmethod
def A_ ( cls : List[Any] , _a : Union[Path, str] , **_a : List[str] ):
...
@staticmethod
@abstractmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
...
class __lowercase ( A, A ):
'''simple docstring'''
_A : List[bytes] = []
@staticmethod
def A_ ( _a : Union[Path, str] , _a : int ):
with open(_a , '''rb''' ) as f:
return f.read(_a )
@classmethod
def A_ ( cls : str , _a : Union[Path, str] , _a : bytes = b"" ):
if not magic_number:
UpperCamelCase__ = max(len(_a ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase__ = cls.read_magic_number(_a , _a )
except OSError:
return False
return any(magic_number.startswith(_a ) for cls_magic_number in cls.magic_numbers )
class __lowercase ( A ):
'''simple docstring'''
@classmethod
def A_ ( cls : Union[str, Any] , _a : Union[Path, str] , **_a : Any ):
return tarfile.is_tarfile(_a )
@staticmethod
def A_ ( _a : int , _a : List[str] ):
def resolved(_a : str ) -> str:
return os.path.realpath(os.path.abspath(_a ) )
def badpath(_a : str , _a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_a , _a ) ).startswith(_a )
def badlink(_a : Tuple , _a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase__ = resolved(os.path.join(_a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_a )
UpperCamelCase__ = resolved(_a )
for finfo in members:
if badpath(finfo.name , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(_a , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(_a , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
os.makedirs(_a , exist_ok=_a )
UpperCamelCase__ = tarfile.open(_a )
tar_file.extractall(_a , members=TarExtractor.safemembers(_a , _a ) )
tar_file.close()
class __lowercase ( A ):
'''simple docstring'''
_A : int = [b'''\x1F\x8B''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with gzip.open(_a , '''rb''' ) as gzip_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : int = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def A_ ( cls : Dict , _a : Union[Path, str] , _a : bytes = b"" ):
if super().is_extractable(_a , magic_number=_a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_a , '''rb''' ) as fp:
UpperCamelCase__ = _EndRecData(_a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase__ = fp.read(_a ) # CD is where we expect it to be
if len(_a ) == sizeCentralDir:
UpperCamelCase__ = struct.unpack(_a , _a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
os.makedirs(_a , exist_ok=_a )
with zipfile.ZipFile(_a , '''r''' ) as zip_file:
zip_file.extractall(_a )
zip_file.close()
class __lowercase ( A ):
'''simple docstring'''
_A : Tuple = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with lzma.open(_a ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Union[str, Any] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_a , exist_ok=_a )
UpperCamelCase__ = rarfile.RarFile(_a )
rf.extractall(_a )
rf.close()
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[Any] = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
UpperCamelCase__ = zstd.ZstdDecompressor()
with open(_a , '''rb''' ) as ifh, open(_a , '''wb''' ) as ofh:
dctx.copy_stream(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Any = [b'''\x42\x5A\x68''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with bza.open(_a , '''rb''' ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[int] = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_a , exist_ok=_a )
with pyazr.SevenZipFile(_a , '''r''' ) as archive:
archive.extractall(_a )
class __lowercase ( A ):
'''simple docstring'''
_A : Union[str, Any] = [b'''\x04\x22\x4D\x18''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_a , '''rb''' ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase :
'''simple docstring'''
_A : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A_ ( cls : Dict ):
return max(
len(_a )
for extractor in cls.extractors.values()
if issubclass(_a , _a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A_ ( _a : Union[Path, str] , _a : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(_a , magic_number_length=_a )
except OSError:
return b""
@classmethod
def A_ ( cls : Optional[Any] , _a : Union[Path, str] , _a : bool = False ):
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_a , )
UpperCamelCase__ = cls.infer_extractor_format(_a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A_ ( cls : str , _a : Union[Path, str] ): # <Added version="2.4.0"/>
UpperCamelCase__ = cls._get_magic_number_max_length()
UpperCamelCase__ = cls._read_magic_number(_a , _a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_a , magic_number=_a ):
return extractor_format
@classmethod
def A_ ( cls : List[Any] , _a : Union[Path, str] , _a : Union[Path, str] , _a : Optional[str] = None , _a : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(_a ) , exist_ok=_a )
# Prevent parallel extractions
UpperCamelCase__ = str(Path(_a ).with_suffix('''.lock''' ) )
with FileLock(_a ):
shutil.rmtree(_a , ignore_errors=_a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_a , _a ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_a , )
UpperCamelCase__ = extractor if extractor != '''deprecated''' else extractor_format
else:
UpperCamelCase__ = cls.extractors[extractor_format]
return extractor.extract(_a , _a )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_a ):
return extractor.extract(_a , _a )
| 35
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.