code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase (__lowercase ):
def __init__( self : int , *__UpperCAmelCase : List[Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : Any ) -> List[str]:
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = eval_examples
SCREAMING_SNAKE_CASE__ = post_process_function
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Optional[Dataset] = None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "eval" , **__UpperCAmelCase : Any , ) -> Dict[str, float]:
SCREAMING_SNAKE_CASE__ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE__ = gen_kwargs
SCREAMING_SNAKE_CASE__ = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE__ = self.get_eval_dataloader(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ = self.compute_metrics
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE__ = eval_loop(
_lowerCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
SCREAMING_SNAKE_CASE__ = compute_metrics
SCREAMING_SNAKE_CASE__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE__ = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE__ = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCAmelCase )
return metrics
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str = "test" , **__UpperCAmelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE__ = self.get_test_dataloader(_lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ = self.compute_metrics
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE__ = eval_loop(
_lowerCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
SCREAMING_SNAKE_CASE__ = compute_metrics
SCREAMING_SNAKE_CASE__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE__ = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , """predict""" )
SCREAMING_SNAKE_CASE__ = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
SCREAMING_SNAKE_CASE__ = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCAmelCase )
| 196
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54
| 0
|
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowercase : Dict = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase : Any ):
lowerCamelCase_ = r'''\w+[.]\d+'''
lowerCamelCase_ = re.findall(_lowerCamelCase , _lowerCamelCase )
for pat in pats:
lowerCamelCase_ = key.replace(_lowerCamelCase , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ):
lowerCamelCase_ = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase_ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase_ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase_ = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowerCamelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=4_2 ):
# Step 1: Convert pytorch tensor to numpy
lowerCamelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase_ = flax_model.init_weights(PRNGKey(_lowerCamelCase ) )
lowerCamelCase_ = flatten_dict(_lowerCamelCase )
lowerCamelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ = rename_key(_lowerCamelCase )
lowerCamelCase_ = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowerCamelCase_ , lowerCamelCase_ = rename_key_and_reshape_tensor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCamelCase_ = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
| 66
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCamelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowerCamelCase_ = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCamelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCamelCase_ = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase__ )
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = torch.Size((1, 16) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=UpperCamelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 66
| 1
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A_ : NestedDataStructureLike[PathLike] , A_ : Optional[NamedSplit] = None , A_ : Optional[Features] = None , A_ : str = None , A_ : bool = False , A_ : bool = False , A_ : Optional[str] = None , A_ : Optional[int] = None , **A_ : Optional[Any] , )-> str:
super().__init__(
A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
__UpperCamelCase = field
__UpperCamelCase = path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths}
__UpperCamelCase = Json(
cache_dir=A_ , data_files=A_ , features=A_ , field=A_ , **A_ , )
def A ( self : List[str] )-> str:
# Build iterable dataset
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , A_ : Dataset , A_ : Union[PathLike, BinaryIO] , A_ : Optional[int] = None , A_ : Optional[int] = None , **A_ : Optional[Any] , )-> Any:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = "utf-8"
__UpperCamelCase = to_json_kwargs
def A ( self : Tuple )-> int:
__UpperCamelCase = self.to_json_kwargs.pop("path_or_buf" , A_ )
__UpperCamelCase = self.to_json_kwargs.pop("orient" , "records" )
__UpperCamelCase = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
__UpperCamelCase = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
__UpperCamelCase = self.to_json_kwargs.pop("compression" , A_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=A_ ) as buffer:
__UpperCamelCase = self._write(file_obj=A_ , orient=A_ , lines=A_ , index=A_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
__UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=A_ , lines=A_ , index=A_ , **self.to_json_kwargs )
return written
def A ( self : Union[str, Any] , A_ : str )-> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(A_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=A_ , orient=A_ , lines=A_ , index=A_ , **A_ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def A ( self : Dict , A_ : BinaryIO , A_ : Tuple , A_ : str , A_ : List[Any] , **A_ : Union[str, Any] , )-> int:
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
__UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(A_ )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A_ , A_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(A_ )
return written
| 505
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_A = logging.getLogger(__name__)
_A = "pytorch_model.bin"
@dataclasses.dataclass
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
_snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
_snake_case : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
_snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
_snake_case : Optional[str] = dataclasses.field(
default=snake_case__ , metadata={'help': 'The name of the task to train on.'} , )
_snake_case : Optional[List[str]] = dataclasses.field(
default=snake_case__ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __UpperCAmelCase :
"""simple docstring"""
_snake_case : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
_snake_case : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
_snake_case : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
_snake_case : Optional[int] = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
_snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
_snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
_snake_case : Optional[bool] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
_snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
_snake_case : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_snake_case : Optional[int] = dataclasses.field(
default=snake_case__ , metadata={'help': 'Random seed for initialization.'} , )
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> str:
'''simple docstring'''
__UpperCamelCase = datasets.concatenate_datasets([infer_input, infer_output] ,axis=1 )
if args.do_filter_by_confidence:
__UpperCamelCase = dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__UpperCamelCase = int(eval_result * len(_snake_case ) )
print(_snake_case )
__UpperCamelCase = dataset.sort("probability" ,reverse=_snake_case )
__UpperCamelCase = dataset.select(range(_snake_case ) )
__UpperCamelCase = dataset.remove_columns(["label", "probability"] )
__UpperCamelCase = dataset.rename_column("prediction" ,"label" )
__UpperCamelCase = dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__UpperCamelCase = dataset.shuffle(seed=args.seed )
__UpperCamelCase = os.path.join(_snake_case ,f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case ,index=_snake_case )
else:
dataset.to_json(_snake_case )
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ) -> List[str]:
'''simple docstring'''
__UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__UpperCamelCase = STModelArguments(model_name_or_path=_snake_case )
__UpperCamelCase = STDataArguments(train_file=_snake_case ,infer_file=_snake_case )
__UpperCamelCase = STTrainingArguments(output_dir=_snake_case )
__UpperCamelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case ,_snake_case ,_snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case ,_snake_case ):
setattr(_snake_case ,_snake_case ,_snake_case )
# Sanity checks
__UpperCamelCase = {}
__UpperCamelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__UpperCamelCase = args.train_file
__UpperCamelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__UpperCamelCase = args.eval_file
for key in data_files:
__UpperCamelCase = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
__UpperCamelCase = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__UpperCamelCase = f"""{args.output_dir}/self-train_iter-{{}}""".format
__UpperCamelCase = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir ,exist_ok=_snake_case )
os.makedirs(_snake_case ,exist_ok=_snake_case )
accelerator.wait_for_everyone()
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = False
# Show the progress bar
__UpperCamelCase = tqdm(range(args.max_selftrain_iterations ) ,disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 ,int(args.max_selftrain_iterations ) ):
__UpperCamelCase = data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__UpperCamelCase = os.path.join(_snake_case ,"stage-1" )
__UpperCamelCase = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case ,_snake_case ):
arguments_dict.update({key: value} )
__UpperCamelCase = os.path.join(_snake_case ,"best-checkpoint" ,_snake_case )
if os.path.exists(_snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." ,_snake_case ,_snake_case ,)
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" ,_snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 1." ,_snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__UpperCamelCase = os.path.join(_snake_case ,"best-checkpoint" )
__UpperCamelCase = os.path.join(_snake_case ,"stage-2" )
# Update arguments_dict
__UpperCamelCase = model_path
__UpperCamelCase = data_files["train"]
__UpperCamelCase = current_output_dir
__UpperCamelCase = os.path.join(_snake_case ,"best-checkpoint" ,_snake_case )
if os.path.exists(_snake_case ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." ,_snake_case ,_snake_case ,)
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" ,_snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info("Self-training job completed: iteration: %d, stage: 2." ,_snake_case )
__UpperCamelCase = iteration
__UpperCamelCase = data_dir_format(iteration + 1 )
__UpperCamelCase = AutoConfig.from_pretrained(os.path.join(_snake_case ,"best-checkpoint" ) )
__UpperCamelCase = config.idalabel
__UpperCamelCase = os.path.join(_snake_case ,"eval_results_best-checkpoint.json" )
__UpperCamelCase = os.path.join(_snake_case ,"test_results_best-checkpoint.json" )
assert os.path.exists(_snake_case )
with open(_snake_case ,"r" ) as f:
__UpperCamelCase = float(json.load(_snake_case )[args.eval_metric] )
__UpperCamelCase = os.path.join(_snake_case ,"infer_output_best-checkpoint.csv" )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__UpperCamelCase = load_dataset(args.data_file_extension ,data_files={"data": data_files["infer"]} )["data"]
__UpperCamelCase = load_dataset("csv" ,data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_snake_case ,exist_ok=_snake_case )
shutil.copy(_snake_case ,os.path.join(_snake_case ,f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case ,os.path.join(_snake_case ,f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
accelerator.wait_for_everyone()
__UpperCamelCase = os.path.join(_snake_case ,f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__UpperCamelCase = eval_result
if best_iteration is None:
__UpperCamelCase = new_iteration
__UpperCamelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__UpperCamelCase = new_iteration
__UpperCamelCase = new_eval_result
__UpperCamelCase = 0
else:
if new_eval_result == best_eval_result:
__UpperCamelCase = new_iteration
__UpperCamelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__UpperCamelCase = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" ,_snake_case )
logger.info("Best evaluation result: %s = %f" ,args.eval_metric ,_snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case ,f"""eval_results_iter-{iteration}.json""" ) ,os.path.join(_snake_case ,"eval_results_best-iteration.json" ) ,)
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" ,args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" ,args.eval_metric ,_snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case ,f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) ,os.path.join(_snake_case ,"eval_results_best-iteration.json" ) ,)
| 505
| 1
|
'''simple docstring'''
import re
import subprocess
import sys
lowerCAmelCase__ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
lowerCAmelCase__ = (
subprocess.check_output(F'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode("utf-8").split()
)
lowerCAmelCase__ = "|".join(sys.argv[1:])
lowerCAmelCase__ = re.compile(rF'^({joined_dirs}).*?\.py$')
lowerCAmelCase__ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 713
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase ( a_ ):
pass
class lowercase :
def __init__( self , _snake_case) -> None:
UpperCAmelCase_ : Any = data
UpperCAmelCase_ : Node | None = None
def __iter__( self) -> Optional[int]:
UpperCAmelCase_ : int = self
UpperCAmelCase_ : List[str] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_snake_case)
yield node.data
UpperCAmelCase_ : Tuple = node.next_node
@property
def _snake_case ( self) -> bool:
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCAmelCase__ = Node(1)
lowerCAmelCase__ = Node(2)
lowerCAmelCase__ = Node(3)
lowerCAmelCase__ = Node(4)
print(root_node.has_loop) # False
lowerCAmelCase__ = root_node.next_node
print(root_node.has_loop) # True
lowerCAmelCase__ = Node(5)
lowerCAmelCase__ = Node(6)
lowerCAmelCase__ = Node(5)
lowerCAmelCase__ = Node(6)
print(root_node.has_loop) # False
lowerCAmelCase__ = Node(1)
print(root_node.has_loop) # False
| 471
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase_ ( __a : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
a_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase_ ( __a : int ):
'''simple docstring'''
if not isinstance(__a , __a ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
_lowerCamelCase : Union[str, Any] = []
for num in range(len(__a ) ):
_lowerCamelCase : Tuple = 0
while 2 * i * i <= odd_composites[num]:
_lowerCamelCase : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(__a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__a ) == n:
return list_nums
return []
def UpperCAmelCase_ ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 437
|
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
a_ = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
a_ = re.compile(r"""([a-z\d])([A-Z])""")
a_ = re.compile(r"""(?<!_)_(?!_)""")
a_ = re.compile(r"""(_{2,})""")
a_ = r"""^\w+(\.\w+)*$"""
a_ = r"""<>:/\|?*"""
def UpperCAmelCase_ ( __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = _uppercase_uppercase_re.sub(r'\1_\2' , __a )
_lowerCamelCase : Tuple = _lowercase_uppercase_re.sub(r'\1_\2' , __a )
return name.lower()
def UpperCAmelCase_ ( __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = _single_underscore_re.split(__a )
_lowerCamelCase : Tuple = [_multiple_underscores_re.split(__a ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__a ) if n != '' )
def UpperCAmelCase_ ( __a : List[Any] ):
'''simple docstring'''
if os.path.basename(__a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__a )
def UpperCAmelCase_ ( __a : Union[str, Any] , __a : Optional[int] ):
'''simple docstring'''
if os.path.basename(__a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __a ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(__a )}-{split}"
def UpperCAmelCase_ ( __a : Any , __a : Union[str, Any] , __a : List[Any] , __a : List[str]=None ):
'''simple docstring'''
_lowerCamelCase : List[Any] = filename_prefix_for_split(__a , __a )
if filetype_suffix:
prefix += f".{filetype_suffix}"
_lowerCamelCase : List[str] = os.path.join(__a , __a )
return f"{filepath}*"
def UpperCAmelCase_ ( __a : str , __a : List[Any] , __a : List[str] , __a : Tuple=None , __a : Tuple=None ):
'''simple docstring'''
_lowerCamelCase : Tuple = filename_prefix_for_split(__a , __a )
_lowerCamelCase : List[str] = os.path.join(__a , __a )
if shard_lengths:
_lowerCamelCase : Union[str, Any] = len(__a )
_lowerCamelCase : str = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__a )]
if filetype_suffix:
_lowerCamelCase : int = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
_lowerCamelCase : int = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 437
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a_ ( lowercase__ ):
UpperCamelCase__ : torch.FloatTensor
UpperCamelCase__ : torch.FloatTensor
class a_ ( lowercase__ , lowercase__ ):
UpperCamelCase__ : Optional[int] =1
@register_to_config
def __init__( self :Tuple , _lowercase :Tuple = 2000 , _lowercase :Any = 0.15 , _lowercase :List[str] = 0.01 , _lowercase :List[str] = 1_348.0 , _lowercase :Dict = 1E-5 , _lowercase :str = 1 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
UpperCAmelCase_ = sigma_max
# setable values
UpperCAmelCase_ = None
self.set_sigmas(_lowercase , _lowercase , _lowercase , _lowercase)
def __a ( self :Any , _lowercase :int , _lowercase :Tuple = None) -> List[str]:
return sample
def __a ( self :Optional[Any] , _lowercase :Optional[Any] , _lowercase :str = None , _lowercase :List[Any] = None) -> Tuple:
UpperCAmelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase_ = torch.linspace(1 , _lowercase , _lowercase , device=_lowercase)
def __a ( self :Optional[int] , _lowercase :Tuple , _lowercase :List[Any] = None , _lowercase :Optional[Any] = None , _lowercase :Tuple = None) -> Dict:
UpperCAmelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_lowercase , _lowercase)
UpperCAmelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase_ = torch.exp(torch.linspace(math.log(_lowercase) , math.log(_lowercase) , _lowercase))
UpperCAmelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def __a ( self :List[str] , _lowercase :Dict , _lowercase :int) -> Optional[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , )
def __a ( self :Dict , _lowercase :str , _lowercase :Union[str, Any] , _lowercase :Optional[int] , _lowercase :Dict = None , _lowercase :Optional[int] = True , ) -> Any:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''')
UpperCAmelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase_ = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase_ = timesteps.to(self.discrete_sigmas.device)
UpperCAmelCase_ = self.discrete_sigmas[timesteps].to(sample.device)
UpperCAmelCase_ = self.get_adjacent_sigma(_lowercase , _lowercase).to(sample.device)
UpperCAmelCase_ = torch.zeros_like(_lowercase)
UpperCAmelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase_ = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
UpperCAmelCase_ = diffusion.unsqueeze(-1)
UpperCAmelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_lowercase , device=sample.device , dtype=sample.dtype)
UpperCAmelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_lowercase , prev_sample_mean=_lowercase)
def __a ( self :List[Any] , _lowercase :Dict , _lowercase :Optional[int] , _lowercase :Optional[Any] = None , _lowercase :Union[str, Any] = True , ) -> Dict:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_lowercase).to(sample.device)
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean()
UpperCAmelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean()
UpperCAmelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase_ = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase_ = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
UpperCAmelCase_ = step_size.unsqueeze(-1)
UpperCAmelCase_ = sample + step_size * model_output
UpperCAmelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[int] , _lowercase :List[str] , _lowercase :Union[str, Any] , ) -> List[Any]:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = timesteps.to(original_samples.device)
UpperCAmelCase_ = self.discrete_sigmas.to(original_samples.device)[timesteps]
UpperCAmelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_lowercase) * sigmas[:, None, None, None]
)
UpperCAmelCase_ = noise + original_samples
return noisy_samples
def __len__( self :int) -> Any:
return self.config.num_train_timesteps
| 701
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] ="bert"
def __init__( self :Any , _lowercase :Optional[int]=30522 , _lowercase :str=768 , _lowercase :Union[str, Any]=12 , _lowercase :Dict=12 , _lowercase :Optional[Any]=3072 , _lowercase :List[Any]="gelu" , _lowercase :Dict=0.1 , _lowercase :Union[str, Any]=0.1 , _lowercase :Optional[int]=512 , _lowercase :List[str]=2 , _lowercase :List[str]=0.02 , _lowercase :Union[str, Any]=1E-1_2 , _lowercase :Dict=0 , _lowercase :List[str]="absolute" , _lowercase :Union[str, Any]=True , _lowercase :str=None , **_lowercase :Union[str, Any] , ) -> Dict:
super().__init__(pad_token_id=_lowercase , **_lowercase)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a_ ( _snake_case ):
@property
def __a ( self :str) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 561
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
|
'''simple docstring'''
import random
def UpperCamelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
__lowerCAmelCase = num - 1
__lowerCAmelCase = 0
while s % 2 == 0:
__lowerCAmelCase = s // 2
t += 1
for _ in range(5 ):
__lowerCAmelCase = random.randrange(2 , num - 1 )
__lowerCAmelCase = pow(snake_case_ , snake_case_ , snake_case_ )
if v != 1:
__lowerCAmelCase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowerCAmelCase = i + 1
__lowerCAmelCase = (v**2) % num
return True
def UpperCamelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if num < 2:
return False
__lowerCAmelCase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case_ )
def UpperCamelCase_ ( snake_case_ : int = 10_24 ) -> int:
'''simple docstring'''
while True:
__lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(snake_case_ ):
return num
if __name__ == "__main__":
_A : List[Any] = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 427
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = tempfile.mkdtemp()
__UpperCAmelCase : Optional[Any] = BlipImageProcessor()
__UpperCAmelCase : int = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel")
__UpperCAmelCase : int = BlipProcessor(UpperCamelCase_ , UpperCamelCase_)
processor.save_pretrained(self.tmpdirname)
def a_ ( self : List[str] , **UpperCamelCase_ : int):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_).tokenizer
def a_ ( self : Optional[int] , **UpperCamelCase_ : List[Any]):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_).image_processor
def a_ ( self : Optional[Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCAmelCase : Dict = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : str = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : str = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
__UpperCAmelCase : Any = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0)
__UpperCAmelCase : List[Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCamelCase_)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = BlipProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = image_processor(UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Union[str, Any] = processor(images=UpperCamelCase_ , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
__UpperCAmelCase : Dict = BlipProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Tuple = "lower newer"
__UpperCAmelCase : str = processor(text=UpperCamelCase_)
__UpperCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = BlipProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : List[str] = "lower newer"
__UpperCAmelCase : Optional[Any] = self.prepare_image_inputs()
__UpperCAmelCase : int = processor(text=UpperCamelCase_ , images=UpperCamelCase_)
self.assertListEqual(list(inputs.keys()) , ["pixel_values", "input_ids", "attention_mask"])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Dict = BlipProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : Union[str, Any] = processor.batch_decode(UpperCamelCase_)
__UpperCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = BlipProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : List[str] = "lower newer"
__UpperCAmelCase : Optional[Any] = self.prepare_image_inputs()
__UpperCAmelCase : Dict = processor(text=UpperCamelCase_ , images=UpperCamelCase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ["pixel_values", "input_ids", "attention_mask"])
| 487
|
"""simple docstring"""
from manim import *
class a__ ( __magic_name__ ):
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : List[str] = Rectangle(height=0.5 , width=0.5)
__UpperCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25)
__UpperCAmelCase : Dict = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
__UpperCAmelCase : List[str] = [mem.copy() for i in range(6)]
__UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6)]
__UpperCAmelCase : List[str] = VGroup(*UpperCamelCase_).arrange(UpperCamelCase_ , buff=0)
__UpperCAmelCase : List[Any] = VGroup(*UpperCamelCase_).arrange(UpperCamelCase_ , buff=0)
__UpperCAmelCase : Optional[int] = VGroup(UpperCamelCase_ , UpperCamelCase_).arrange(UpperCamelCase_ , buff=0)
__UpperCAmelCase : Dict = Text("CPU" , font_size=24)
__UpperCAmelCase : int = Group(UpperCamelCase_ , UpperCamelCase_).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(UpperCamelCase_)
__UpperCAmelCase : List[str] = [mem.copy() for i in range(4)]
__UpperCAmelCase : List[Any] = VGroup(*UpperCamelCase_).arrange(UpperCamelCase_ , buff=0)
__UpperCAmelCase : Tuple = Text("GPU" , font_size=24)
__UpperCAmelCase : Tuple = Group(UpperCamelCase_ , UpperCamelCase_).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_)
gpu.move_to([-1, -1, 0])
self.add(UpperCamelCase_)
__UpperCAmelCase : Any = [mem.copy() for i in range(6)]
__UpperCAmelCase : int = VGroup(*UpperCamelCase_).arrange(UpperCamelCase_ , buff=0)
__UpperCAmelCase : Union[str, Any] = Text("Model" , font_size=24)
__UpperCAmelCase : Optional[Any] = Group(UpperCamelCase_ , UpperCamelCase_).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_)
model.move_to([3, -1.0, 0])
self.add(UpperCamelCase_)
__UpperCAmelCase : str = []
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Any = []
for i, rect in enumerate(UpperCamelCase_):
rect.set_stroke(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(UpperCamelCase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=UpperCamelCase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCamelCase_ , buff=0.0)
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCamelCase_ , buff=0.0)
self.add(UpperCamelCase_)
model_cpu_arr.append(UpperCamelCase_)
self.add(*UpperCamelCase_ , *UpperCamelCase_ , *UpperCamelCase_)
__UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6)]
__UpperCAmelCase : Dict = VGroup(*UpperCamelCase_).arrange(UpperCamelCase_ , buff=0)
__UpperCAmelCase : Tuple = Text("Loaded Checkpoint" , font_size=24)
__UpperCAmelCase : Any = Group(UpperCamelCase_ , UpperCamelCase_).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_)
checkpoint.move_to([3, 0.5, 0])
self.add(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : int = []
for i, rect in enumerate(UpperCamelCase_):
__UpperCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCamelCase_ , opacity=0.7)
target.move_to(UpperCamelCase_)
ckpt_arr.append(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.move_to(cpu_right_col_base[i - 5])
ckpt_cpu_arr.append(UpperCamelCase_)
self.add(*UpperCamelCase_ , *UpperCamelCase_)
__UpperCAmelCase : str = Square(side_length=2.2)
key.move_to([-5, 2, 0])
__UpperCAmelCase : int = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(UpperCamelCase_)
__UpperCAmelCase : Dict = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0])
__UpperCAmelCase : List[Any] = [meta_mem.copy() for i in range(6)]
__UpperCAmelCase : Optional[int] = [meta_mem.copy() for i in range(6)]
__UpperCAmelCase : int = VGroup(*UpperCamelCase_).arrange(UpperCamelCase_ , buff=0)
__UpperCAmelCase : Union[str, Any] = VGroup(*UpperCamelCase_).arrange(UpperCamelCase_ , buff=0)
__UpperCAmelCase : Tuple = VGroup(UpperCamelCase_ , UpperCamelCase_).arrange(UpperCamelCase_ , buff=0)
__UpperCAmelCase : Optional[int] = Text("Disk" , font_size=24)
__UpperCAmelCase : str = Group(UpperCamelCase_ , UpperCamelCase_).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_)
disk.move_to([-4.0, -1.25, 0])
self.play(Write(UpperCamelCase_ , run_time=3) , Write(UpperCamelCase_ , run_time=1) , Create(UpperCamelCase_ , run_time=1))
__UpperCAmelCase : str = []
for i, rect in enumerate(UpperCamelCase_):
__UpperCAmelCase : Union[str, Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i]).scale(0.5)
animations.append(MoveToTarget(UpperCamelCase_ , run_time=1.5))
self.play(*UpperCamelCase_)
self.play(FadeOut(UpperCamelCase_))
__UpperCAmelCase : Union[str, Any] = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24)
step_a.move_to([2, 2, 0])
self.play(Write(UpperCamelCase_ , run_time=3))
self.play(
FadeOut(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , *UpperCamelCase_) , )
self.wait()
| 487
| 1
|
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
"""simple docstring"""
from math import ceil, sqrt
def lowercase__ ( lowerCAmelCase : int = 1_000_000 ) -> int:
"""simple docstring"""
UpperCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
UpperCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
UpperCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'{solution() = }')
| 373
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717
|
from __future__ import annotations
def _lowercase ( a_ : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
__magic_name__ = nums[0]
__magic_name__ = 0
for num in nums[1:]:
__magic_name__, __magic_name__ = (
max_excluding + num,
max(a_ ,a_ ),
)
return max(a_ ,a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184
| 0
|
from collections.abc import Sequence
def lowerCamelCase_ ( lowerCAmelCase__ : Sequence[int] | None = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
A = nums[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
A = nums[i]
A = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__snake_case :str =int(input('Enter number of elements : ').strip())
__snake_case :Optional[int] =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 106
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class lowerCamelCase_ ( Generic[T] ):
__lowercase : deque[T] # Cache store of keys
__lowercase : set[T] # References of the keys in cache
__lowercase : int = 10 # Maximum capacity of cache
def __init__( self , lowerCamelCase_ ) -> None:
"""simple docstring"""
_UpperCamelCase = deque()
_UpperCamelCase = set()
if not n:
_UpperCamelCase = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_UpperCamelCase = n
def lowercase ( self , lowerCamelCase_ ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCamelCase = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase_ )
else:
self.dq_store.remove(lowerCamelCase_ )
self.dq_store.appendleft(lowerCamelCase_ )
self.key_reference.add(lowerCamelCase_ )
def lowercase ( self ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(lowerCamelCase_ )
def __repr__( self ) -> str:
"""simple docstring"""
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 147
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
_lowercase = TypeVar("""T""")
class UpperCAmelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = data
_lowerCAmelCase = self
_lowerCAmelCase = 0
class UpperCAmelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = {}
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = DisjointSetTreeNode(lowerCAmelCase_ )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.map[data]
if elem_ref != elem_ref.parent:
_lowerCAmelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
if nodea.rank > nodea.rank:
_lowerCAmelCase = nodea
else:
_lowerCAmelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
self.link(self.find_set(lowerCAmelCase_ ) , self.find_set(lowerCAmelCase_ ) )
class UpperCAmelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = {}
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if node not in self.connections:
_lowerCAmelCase = {}
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
self.add_node(lowerCAmelCase_ )
self.add_node(lowerCAmelCase_ )
_lowerCAmelCase = weight
_lowerCAmelCase = weight
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _lowercase : x[2] )
# creating the disjoint set
_lowerCAmelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCAmelCase_ )
# MST generation
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = edges[index]
index += 1
_lowerCAmelCase = disjoint_set.find_set(lowerCAmelCase_ )
_lowerCAmelCase = disjoint_set.find_set(lowerCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
disjoint_set.union(lowerCAmelCase_ , lowerCAmelCase_ )
return graph
| 717
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def A (__lowerCamelCase :str ):
_lowerCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCAmelCase = 128
elif "12-12" in model_name:
_lowerCAmelCase = 12
_lowerCAmelCase = 12
elif "14-14" in model_name:
_lowerCAmelCase = 14
_lowerCAmelCase = 14
elif "16-16" in model_name:
_lowerCAmelCase = 16
_lowerCAmelCase = 16
else:
raise ValueError("""Model not supported""" )
_lowerCAmelCase = """huggingface/label-files"""
if "speech-commands" in model_name:
_lowerCAmelCase = 35
_lowerCAmelCase = """speech-commands-v2-id2label.json"""
else:
_lowerCAmelCase = 527
_lowerCAmelCase = """audioset-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def A (__lowerCamelCase :Dict ):
if "module.v" in name:
_lowerCAmelCase = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
_lowerCAmelCase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
_lowerCAmelCase = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
_lowerCAmelCase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCAmelCase = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
_lowerCAmelCase = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
_lowerCAmelCase = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any ):
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
_lowerCAmelCase = key.split(""".""" )
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def A (__lowerCamelCase :str ):
_lowerCAmelCase = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Union[str, Any] , __lowerCamelCase :List[Any]=False ):
_lowerCAmelCase = get_audio_spectrogram_transformer_config(__lowerCamelCase )
_lowerCAmelCase = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )
# remove some keys
remove_keys(__lowerCamelCase )
# rename some keys
_lowerCAmelCase = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
# load 🤗 model
_lowerCAmelCase = ASTForAudioClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCAmelCase = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
_lowerCAmelCase = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
_lowerCAmelCase = 1024 if """speech-commands""" not in model_name else 128
_lowerCAmelCase = ASTFeatureExtractor(mean=__lowerCamelCase , std=__lowerCamelCase , max_length=__lowerCamelCase )
if "speech-commands" in model_name:
_lowerCAmelCase = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
_lowerCAmelCase = dataset[0]["""audio"""]["""array"""]
else:
_lowerCAmelCase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
_lowerCAmelCase , _lowerCAmelCase = torchaudio.load(__lowerCamelCase )
_lowerCAmelCase = waveform.squeeze().numpy()
_lowerCAmelCase = feature_extractor(__lowerCamelCase , sampling_rate=16000 , return_tensors="""pt""" )
# forward pass
_lowerCAmelCase = model(**__lowerCamelCase )
_lowerCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCAmelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCAmelCase = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCAmelCase = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCAmelCase = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCAmelCase = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCAmelCase = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCAmelCase = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCAmelCase = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 162
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_2_8, """min_length""": 1_2, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_4_2, """min_length""": 5_6, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 6_2, """min_length""": 1_1, """num_beams""": 6},
}
}
snake_case_ : int = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_2_8,
"""task_specific_params.summarization.min_length""": 1_2,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_4_2,
"""task_specific_params.summarization_cnn.min_length""": 5_6,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 6_2,
"""task_specific_params.summarization_xsum.min_length""": 1_1,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowercase ) , x.transpose() ) )
snake_case_ : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : int = np.random.randn(3 , 4 )
snake_case_ : Union[str, Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
snake_case_ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
snake_case_ : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Dict = np.random.randn(3 , 4 )
snake_case_ : Union[str, Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
snake_case_ : List[Any] = np.random.randn(3 , 4 , 5 )
snake_case_ : int = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = np.random.randn(3 , 4 )
snake_case_ : Tuple = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , np.asarray(transpose(_lowercase ) ) ) )
snake_case_ : Any = np.random.randn(3 , 4 , 5 )
snake_case_ : List[str] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowercase , axes=(1, 2, 0) ) ) ) )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.reshape(_lowercase , (4, 3) ) ) )
snake_case_ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowercase , (1_2, 5) ) , np.reshape(_lowercase , (1_2, 5) ) ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = np.random.randn(3 , 4 )
snake_case_ : Optional[int] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
snake_case_ : Dict = np.random.randn(3 , 4 , 5 )
snake_case_ : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (1_2, 5) ) , reshape(_lowercase , (1_2, 5) ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = np.random.randn(3 , 4 )
snake_case_ : Dict = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
snake_case_ : Optional[int] = np.random.randn(3 , 4 , 5 )
snake_case_ : Tuple = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (1_2, 5) ) , reshape(_lowercase , (1_2, 5) ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = np.random.randn(3 , 4 )
snake_case_ : List[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.asarray(reshape(_lowercase , (4, 3) ) ) ) )
snake_case_ : Optional[int] = np.random.randn(3 , 4 , 5 )
snake_case_ : List[str] = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (1_2, 5) ) , np.asarray(reshape(_lowercase , (1_2, 5) ) ) ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.squeeze(_lowercase ) ) )
snake_case_ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.squeeze(_lowercase , axis=2 ) ) )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = np.random.randn(1 , 3 , 4 )
snake_case_ : Dict = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
snake_case_ : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
snake_case_ : Optional[int] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = np.random.randn(1 , 3 , 4 )
snake_case_ : Optional[Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
snake_case_ : Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case_ : int = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = np.random.randn(1 , 3 , 4 )
snake_case_ : Any = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.asarray(squeeze(_lowercase ) ) ) )
snake_case_ : Tuple = np.random.randn(1 , 4 , 1 , 5 )
snake_case_ : Any = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.asarray(squeeze(_lowercase , axis=2 ) ) ) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.expand_dims(_lowercase , axis=1 ) ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = np.random.randn(3 , 4 )
snake_case_ : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = np.random.randn(3 , 4 )
snake_case_ : List[Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = np.random.randn(3 , 4 )
snake_case_ : List[str] = jnp.array(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.asarray(expand_dims(_lowercase , axis=1 ) ) ) )
| 58
|
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 159
| 0
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def a ( snake_case__: str , snake_case__: Optional[Any] ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
requires_backends(SCREAMING_SNAKE_CASE_ , '''sklearn''' )
return (preds == labels).mean()
def a ( snake_case__: Optional[int] , snake_case__: Optional[Any] ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
requires_backends(SCREAMING_SNAKE_CASE_ , '''sklearn''' )
lowercase_ = simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = fa_score(y_true=SCREAMING_SNAKE_CASE_ , y_pred=SCREAMING_SNAKE_CASE_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def a ( snake_case__: Optional[Any] , snake_case__: List[Any] ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
requires_backends(SCREAMING_SNAKE_CASE_ , '''sklearn''' )
lowercase_ = pearsonr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = spearmanr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def a ( snake_case__: Dict , snake_case__: int , snake_case__: int ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
requires_backends(SCREAMING_SNAKE_CASE_ , '''sklearn''' )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), F'''Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE_ )} and {len(SCREAMING_SNAKE_CASE_ )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
elif task_name == "mrpc":
return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif task_name == "sts-b":
return pearson_and_spearman(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif task_name == "qqp":
return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
else:
raise KeyError(SCREAMING_SNAKE_CASE_ )
def a ( snake_case__: Tuple , snake_case__: str , snake_case__: Optional[Any] ):
'''simple docstring'''
warnings.warn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
requires_backends(SCREAMING_SNAKE_CASE_ , '''sklearn''' )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE_ )} and {len(SCREAMING_SNAKE_CASE_ )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
else:
raise KeyError(SCREAMING_SNAKE_CASE_ )
| 715
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :str = 'efficientformer'
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] = [3, 2, 6, 4] , SCREAMING_SNAKE_CASE_ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , SCREAMING_SNAKE_CASE_ : List[bool] = [True, True, True, True] , SCREAMING_SNAKE_CASE_ : int = 4_4_8 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 4 , SCREAMING_SNAKE_CASE_ : int = 7 , SCREAMING_SNAKE_CASE_ : int = 5 , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 4 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : int = 1_6 , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : float = 1e-5 , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1e-12 , SCREAMING_SNAKE_CASE_ : int = 2_2_4 , SCREAMING_SNAKE_CASE_ : float = 1e-05 , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = hidden_sizes
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = depths
lowercase_ = mlp_expansion_ratio
lowercase_ = downsamples
lowercase_ = dim
lowercase_ = key_dim
lowercase_ = attention_ratio
lowercase_ = resolution
lowercase_ = pool_size
lowercase_ = downsample_patch_size
lowercase_ = downsample_stride
lowercase_ = downsample_pad
lowercase_ = drop_path_rate
lowercase_ = num_metaad_blocks
lowercase_ = distillation
lowercase_ = use_layer_scale
lowercase_ = layer_scale_init_value
lowercase_ = image_size
lowercase_ = batch_norm_eps
| 409
| 0
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
__A = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
snake_case_ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = object_detector(examples[0] , threshold=0.0 )
snake_case_ = len(__UpperCamelCase )
self.assertGreater(__UpperCamelCase , 0 )
self.assertEqual(
__UpperCamelCase , [
{
'score': ANY(__UpperCamelCase ),
'label': ANY(__UpperCamelCase ),
'box': {'xmin': ANY(__UpperCamelCase ), 'ymin': ANY(__UpperCamelCase ), 'xmax': ANY(__UpperCamelCase ), 'ymax': ANY(__UpperCamelCase )},
}
for i in range(__UpperCamelCase )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@require_torch
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
snake_case_ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
snake_case_ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = pipeline('zero-shot-object-detection' )
snake_case_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
snake_case_ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 0.2
snake_case_ = pipeline('zero-shot-object-detection' )
snake_case_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 2
snake_case_ = pipeline('zero-shot-object-detection' )
snake_case_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 187
|
from itertools import count
def a(lowercase__ = 50 ):
'''simple docstring'''
snake_case_ = [1] * min_block_length
for n in count(lowercase__ ):
fill_count_functions.append(1 )
for block_length in range(lowercase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 187
| 1
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__UpperCAmelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__UpperCAmelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def snake_case_ (__A : Vector , __A : Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(__A ) - np.asarray(__A )) ** 2 ) )
def snake_case_ (__A : Vector , __A : Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(__A , __A ) ) ** (1 / 2)
if __name__ == "__main__":
def snake_case_ () -> None:
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_0_0_0_0 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 218
|
def snake_case_ (__A : int = 1_0**9 ) -> int:
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowerCAmelCase : int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 218
| 1
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def UpperCamelCase ( lowercase_ : str ) -> Dict:
'''simple docstring'''
def decorator(lowercase_ : Optional[Any] ):
lowercase =getattr(lowercase_ , '''handle_key''' , [] )
handle += [key]
setattr(lowercase_ , '''handle_key''' , lowercase_ )
return func
return decorator
def UpperCamelCase ( *lowercase_ : List[str] ) -> List[Any]:
'''simple docstring'''
def decorator(lowercase_ : Optional[int] ):
lowercase =getattr(lowercase_ , '''handle_key''' , [] )
handle += keys
setattr(lowercase_ , '''handle_key''' , lowercase_ )
return func
return decorator
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __new__( cls , snake_case_ , snake_case_ , snake_case_ ):
lowercase =super().__new__(cls , snake_case_ , snake_case_ , snake_case_ )
if not hasattr(snake_case_ , '''key_handler''' ):
setattr(snake_case_ , '''key_handler''' , {} )
setattr(snake_case_ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase =getattr(snake_case_ , '''handle_key''' , [] )
for key in handled_keys:
lowercase =value
return new_cls
@staticmethod
def _A( cls ):
lowercase =get_character()
if char != KEYMAP["undefined"]:
lowercase =ord(snake_case_ )
lowercase =cls.key_handler.get(snake_case_ )
if handler:
lowercase =char
return handler(cls )
else:
return None
def UpperCamelCase ( cls : Optional[Any] ) -> Any:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 72
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'marian'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , snake_case_=5_81_01 , snake_case_=None , snake_case_=10_24 , snake_case_=12 , snake_case_=40_96 , snake_case_=16 , snake_case_=12 , snake_case_=40_96 , snake_case_=16 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=10_24 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=5_81_00 , snake_case_=False , snake_case_=5_81_00 , snake_case_=0 , snake_case_=0 , snake_case_=True , **snake_case_ , ):
lowercase =vocab_size
lowercase =decoder_vocab_size or vocab_size
lowercase =max_position_embeddings
lowercase =d_model
lowercase =encoder_ffn_dim
lowercase =encoder_layers
lowercase =encoder_attention_heads
lowercase =decoder_ffn_dim
lowercase =decoder_layers
lowercase =decoder_attention_heads
lowercase =dropout
lowercase =attention_dropout
lowercase =activation_dropout
lowercase =activation_function
lowercase =init_std
lowercase =encoder_layerdrop
lowercase =decoder_layerdrop
lowercase =use_cache
lowercase =encoder_layers
lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _A( self ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase ={0: '''batch'''}
lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase , lowercase =self.num_layers
for i in range(snake_case_ ):
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _A( self ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =super().outputs
else:
lowercase =super(snake_case_ , self ).outputs
if self.use_past:
lowercase , lowercase =self.num_layers
for i in range(snake_case_ ):
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
lowercase =seq_length if not self.use_past else 1
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
lowercase ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowercase =dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase =common_inputs['''input_ids'''].shape
lowercase =common_inputs['''decoder_input_ids'''].shape[1]
lowercase , lowercase =self.num_attention_heads
lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase =decoder_seq_length + 3
lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase =self.num_layers
lowercase =min(snake_case_ , snake_case_ )
lowercase =max(snake_case_ , snake_case_ ) - min_num_layers
lowercase ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
lowercase =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase =seqlen + 2
lowercase , lowercase =self.num_layers
lowercase , lowercase =self.num_attention_heads
lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase =common_inputs['''attention_mask'''].dtype
lowercase =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
lowercase =[
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase =compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase =tokenizer.num_special_tokens_to_add(snake_case_ )
lowercase =compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
lowercase =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase =dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
lowercase =self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
lowercase =super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
@property
def _A( self ):
return 1E-4
| 72
| 1
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase : Tuple = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Any:
lowercase_ : Optional[Any] = {}
state_dict.pop("""pixel_mean""" , __snake_case )
state_dict.pop("""pixel_std""" , __snake_case )
lowercase_ : int = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase_ : int = key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
lowercase_ : Optional[Any] = int(re.match(__snake_case , __snake_case ).group(2 ) )
if layer_nb == 0:
lowercase_ : Tuple = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
lowercase_ : List[Any] = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
lowercase_ : int = key.replace("""layers.2""" , """proj_out""" )
lowercase_ : Any = value
lowercase_ : List[Any] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]="ybelkada/segment-anything" ) -> str:
lowercase_ : int = hf_hub_download(__snake_case , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowercase_ : List[Any] = SamConfig()
elif "sam_vit_l" in model_name:
lowercase_ : Any = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowercase_ : Dict = SamConfig(
vision_config=__snake_case , )
elif "sam_vit_h" in model_name:
lowercase_ : Optional[Any] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowercase_ : Optional[Any] = SamConfig(
vision_config=__snake_case , )
lowercase_ : Optional[Any] = torch.load(__snake_case , map_location="""cpu""" )
lowercase_ : List[str] = replace_keys(__snake_case )
lowercase_ : List[str] = SamImageProcessor()
lowercase_ : Any = SamProcessor(image_processor=__snake_case )
lowercase_ : Dict = SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
lowercase_ : Optional[Any] = hf_model.to("""cuda""" )
lowercase_ : Dict = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowercase_ : List[Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("""RGB""" )
lowercase_ : Tuple = [[[400, 650]]]
lowercase_ : Tuple = [[1]]
lowercase_ : str = processor(images=np.array(__snake_case ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase_ : Tuple = hf_model(**__snake_case )
lowercase_ : Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
lowercase_ : Any = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase_ : List[Any] = hf_model(**__snake_case )
lowercase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
lowercase_ : Tuple = ((75, 275, 1725, 850),)
lowercase_ : str = processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase_ : Optional[int] = hf_model(**__snake_case )
lowercase_ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
lowercase_ : Dict = [[[400, 650], [800, 650]]]
lowercase_ : Optional[Any] = [[1, 1]]
lowercase_ : str = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase_ : Any = hf_model(**__snake_case )
lowercase_ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
_lowercase : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_lowercase : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 712
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 0
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _lowerCamelCase( ) -> Any:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=__snake_case , default=__snake_case , required=__snake_case , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=__snake_case , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=__snake_case , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=__snake_case , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=__snake_case , default=0 , help="cuda_id." , )
__snake_case = parser.parse_args()
return args
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> List[str]:
if not len(__snake_case ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
__snake_case , __snake_case = imgs[0].size
__snake_case = Image.new("RGB" , size=(cols * w, rows * h) )
__snake_case , __snake_case = grid.size
for i, img in enumerate(__snake_case ):
grid.paste(__snake_case , box=(i % cols * w, i // cols * h) )
return grid
def _lowerCamelCase( __snake_case , __snake_case="robotic cat with wings" , __snake_case=7.5 , __snake_case=50 , __snake_case=1 , __snake_case=42 , ) -> Any:
__snake_case = torch.Generator(pipeline.device ).manual_seed(__snake_case )
__snake_case = pipeline(
__snake_case , guidance_scale=__snake_case , num_inference_steps=__snake_case , generator=__snake_case , num_images_per_prompt=__snake_case , ).images
__snake_case = int(math.sqrt(__snake_case ) )
__snake_case = image_grid(__snake_case , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCamelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCamelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowerCamelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowerCamelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowerCamelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCamelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowerCamelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowerCamelCase__ = unet.to(torch.device('cuda', args.cuda_id))
lowerCamelCase__ = pipeline.to(unet.device)
lowerCamelCase__ , lowerCamelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowerCamelCase__ = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 524
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 524
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 710
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4
| 0
|
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE_ = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
SCREAMING_SNAKE_CASE_ = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
SCREAMING_SNAKE_CASE_ = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def A__ ( A__ , A__ ) -> List[Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def A__ ( A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCAmelCase = float(fa_score(y_true=UpperCAmelCase__ , y_pred=UpperCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def A__ ( A__ , A__ ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = np.array(UpperCAmelCase__ )
_UpperCAmelCase = np.array(UpperCAmelCase__ )
_UpperCAmelCase = en_sentvecs.shape[0]
# mean centering
_UpperCAmelCase = en_sentvecs - np.mean(UpperCAmelCase__ , axis=0 )
_UpperCAmelCase = in_sentvecs - np.mean(UpperCAmelCase__ , axis=0 )
_UpperCAmelCase = cdist(UpperCAmelCase__ , UpperCAmelCase__ , "cosine" )
_UpperCAmelCase = np.array(range(UpperCAmelCase__ ) )
_UpperCAmelCase = sim.argsort(axis=1 )[:, :10]
_UpperCAmelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> Dict:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def __A ( self , snake_case_ , snake_case_ ) -> int:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__UpperCamelCase , __UpperCamelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 426
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = '''gpt_neox_japanese'''
def __init__( self : Union[str, Any] , __UpperCamelCase : str=3_2_0_0_0 , __UpperCamelCase : List[Any]=2_5_6_0 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : List[Any]=1.00 , __UpperCamelCase : Any=1_0_0_0_0 , __UpperCamelCase : Optional[Any]=2_0_4_8 , __UpperCamelCase : Tuple=0.02 , __UpperCamelCase : List[str]=1E-5 , __UpperCamelCase : str=True , __UpperCamelCase : str=3_1_9_9_6 , __UpperCamelCase : int=3_1_9_9_9 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Tuple=0.0 , **__UpperCamelCase : List[str] , ):
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 272
| 0
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''width_multiplier''' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : int , _snake_case : Optional[int]=13 , _snake_case : Any=64 , _snake_case : Union[str, Any]=2 , _snake_case : Any=3 , _snake_case : Dict="swish" , _snake_case : List[Any]=3 , _snake_case : int=32 , _snake_case : str=0.1 , _snake_case : Tuple=0.02 , _snake_case : List[Any]=True , _snake_case : Any=True , _snake_case : int=10 , _snake_case : Optional[int]=None , _snake_case : Any=0.25 , _snake_case : Any=0.0 , _snake_case : Optional[int]=0.0 , ):
__lowercase : int = parent
__lowercase : int = batch_size
__lowercase : Tuple = image_size
__lowercase : Optional[Any] = patch_size
__lowercase : Optional[Any] = num_channels
__lowercase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
__lowercase : Tuple = hidden_act
__lowercase : int = conv_kernel_size
__lowercase : List[str] = output_stride
__lowercase : Union[str, Any] = classifier_dropout_prob
__lowercase : Union[str, Any] = use_labels
__lowercase : Any = is_training
__lowercase : List[Any] = num_labels
__lowercase : str = initializer_range
__lowercase : str = scope
__lowercase : List[Any] = width_multiplier
__lowercase : List[Any] = ffn_dropout
__lowercase : Optional[Any] = attn_dropout
def snake_case_ ( self : Optional[int] ):
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : int = None
__lowercase : int = None
if self.use_labels:
__lowercase : int = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self : Tuple ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def snake_case_ ( self : List[str] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : int ):
__lowercase : Tuple = MobileViTVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self : Optional[int] , _snake_case : int , _snake_case : Any , _snake_case : str , _snake_case : Tuple ):
__lowercase : List[Any] = self.num_labels
__lowercase : Union[str, Any] = MobileViTVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : Any , _snake_case : int , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple ):
__lowercase : Union[str, Any] = self.num_labels
__lowercase : Optional[int] = MobileViTVaForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : Any = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self : List[str] ):
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : str = config_and_inputs
__lowercase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A__ : Optional[int] = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Tuple = False
A__ : Dict = False
A__ : List[Any] = False
A__ : int = False
def snake_case_ ( self : str ):
__lowercase : Any = MobileViTVaModelTester(self )
__lowercase : Optional[int] = MobileViTVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def snake_case_ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def snake_case_ ( self : List[Any] ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def snake_case_ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def snake_case_ ( self : Optional[int] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def snake_case_ ( self : int ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case_ ( self : Tuple ):
pass
def snake_case_ ( self : int ):
__lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Union[str, Any] = model_class(_UpperCAmelCase )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Tuple = [*signature.parameters.keys()]
__lowercase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def snake_case_ ( self : List[Any] ):
def check_hidden_states_output(_snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : int ):
__lowercase : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowercase : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : Optional[int] = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase : Optional[int] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : List[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def snake_case_ ( self : int ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def snake_case_ ( self : int ):
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def snake_case_ ( self : Optional[int] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Tuple = MobileViTVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase_ ( ) -> List[Any]:
__lowercase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self : int ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_UpperCAmelCase )
__lowercase : Any = self.default_image_processor
__lowercase : Optional[int] = prepare_img()
__lowercase : int = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowercase : Tuple = model(**_UpperCAmelCase )
# verify the logits
__lowercase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__lowercase : str = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def snake_case_ ( self : List[str] ):
__lowercase : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowercase : Union[str, Any] = model.to(_UpperCAmelCase )
__lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowercase : List[str] = prepare_img()
__lowercase : List[Any] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowercase : Optional[Any] = model(**_UpperCAmelCase )
__lowercase : int = outputs.logits
# verify the logits
__lowercase : Any = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__lowercase : Dict = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def snake_case_ ( self : Optional[int] ):
__lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowercase : List[str] = model.to(_UpperCAmelCase )
__lowercase : List[Any] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowercase : str = model(**_UpperCAmelCase )
__lowercase : Optional[Any] = outputs.logits.detach().cpu()
__lowercase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__lowercase : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__lowercase : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__lowercase : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 714
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , _snake_case : Optional[Any] , _snake_case : Union[str, Any]=13 , _snake_case : Optional[Any]=32 , _snake_case : str=2 , _snake_case : Optional[Any]=3 , _snake_case : Tuple=16 , _snake_case : Optional[int]=[1, 2, 1] , _snake_case : Dict=[2, 2, 4] , _snake_case : int=2 , _snake_case : Any=2.0 , _snake_case : Dict=True , _snake_case : Optional[Any]=0.0 , _snake_case : Any=0.0 , _snake_case : str=0.1 , _snake_case : List[Any]="gelu" , _snake_case : str=False , _snake_case : Optional[int]=True , _snake_case : Dict=0.02 , _snake_case : List[Any]=1E-5 , _snake_case : Union[str, Any]=True , _snake_case : int=None , _snake_case : Optional[Any]=True , _snake_case : Optional[Any]=10 , _snake_case : List[Any]=8 , ):
__lowercase : str = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : int = image_size
__lowercase : int = patch_size
__lowercase : Any = num_channels
__lowercase : Optional[int] = embed_dim
__lowercase : List[str] = depths
__lowercase : List[str] = num_heads
__lowercase : Optional[Any] = window_size
__lowercase : Union[str, Any] = mlp_ratio
__lowercase : int = qkv_bias
__lowercase : Tuple = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : Union[str, Any] = drop_path_rate
__lowercase : str = hidden_act
__lowercase : Optional[Any] = use_absolute_embeddings
__lowercase : Union[str, Any] = patch_norm
__lowercase : Any = layer_norm_eps
__lowercase : int = initializer_range
__lowercase : Optional[Any] = is_training
__lowercase : str = scope
__lowercase : Any = use_labels
__lowercase : Union[str, Any] = type_sequence_label_size
__lowercase : Union[str, Any] = encoder_stride
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : Tuple ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case_ ( self : int , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : int ):
__lowercase : int = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : Dict = model(_snake_case )
__lowercase : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case_ ( self : str , _snake_case : List[Any] , _snake_case : str , _snake_case : str ):
__lowercase : List[Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : str = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowercase : Optional[Any] = 1
__lowercase : int = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_ ( self : Dict , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Optional[Any] ):
__lowercase : Any = self.type_sequence_label_size
__lowercase : Optional[Any] = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : List[str] = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self : Optional[int] ):
__lowercase : int = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : Optional[int] = config_and_inputs
__lowercase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Dict = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : Any = False
A__ : List[str] = False
A__ : int = False
A__ : Tuple = False
def snake_case_ ( self : List[Any] ):
__lowercase : Optional[int] = SwinvaModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=_snake_case , embed_dim=37 )
def snake_case_ ( self : Optional[Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self : List[str] ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def snake_case_ ( self : Optional[int] ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def snake_case_ ( self : Optional[int] ):
pass
def snake_case_ ( self : Union[str, Any] ):
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def snake_case_ ( self : List[Any] ):
__lowercase , __lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = model_class(_snake_case )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : str = [*signature.parameters.keys()]
__lowercase : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Dict = True
for model_class in self.all_model_classes:
__lowercase : List[Any] = True
__lowercase : Dict = False
__lowercase : Any = True
__lowercase : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowercase : Any = outputs.attentions
__lowercase : List[str] = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : List[str] = True
__lowercase : List[Any] = config.window_size**2
__lowercase : int = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowercase : str = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowercase : Any = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__lowercase : List[Any] = len(_snake_case )
# Check attention is always last and order is fine
__lowercase : Dict = True
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowercase : Dict = model(**self._prepare_for_class(_snake_case , _snake_case ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
__lowercase : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__lowercase : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(_snake_case ) )
__lowercase : Any = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def snake_case_ ( self : str , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : str ):
__lowercase : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowercase : Optional[int] = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowercase : Union[str, Any] = outputs.hidden_states
__lowercase : Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) , _snake_case )
# Swinv2 has a different seq_length
__lowercase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowercase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) , _snake_case )
__lowercase , __lowercase , __lowercase , __lowercase : str = reshaped_hidden_states[0].shape
__lowercase : str = (
reshaped_hidden_states[0].view(_snake_case , _snake_case , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case_ ( self : int ):
__lowercase , __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase : List[str] = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : str = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Any = 3
__lowercase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase : List[Any] = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) )
def snake_case_ ( self : Optional[int] ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def snake_case_ ( self : Optional[int] ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def snake_case_ ( self : Dict ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def snake_case_ ( self : str ):
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[int] = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
__lowercase : Tuple = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self : str ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self : Optional[Any] ):
__lowercase : Optional[Any] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
__lowercase : int = self.default_image_processor
__lowercase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowercase : Optional[int] = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
__lowercase : Optional[int] = model(**_snake_case )
# verify the logits
__lowercase : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowercase : Optional[Any] = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
| 284
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 394
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCAmelCase_ ( a : Tuple ):
a__ = int(a )
a__ , a__ , a__ = t // 3600, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def lowerCAmelCase_ ( a : Union[str, Any] , a : int , a : Optional[int] , a : List[Any] , a : Union[str, Any]=300 ):
# docstyle-ignore
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def lowerCAmelCase_ ( a : Tuple ):
a__ = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
a__ = f'''{elt:.6f}''' if isinstance(a , a ) else str(a )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _UpperCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE:Tuple = 5
SCREAMING_SNAKE_CASE:Dict = 0.2
def __init__( self , _a , _a = None , _a = True , _a = None , _a = 300 , ):
"""simple docstring"""
a__ = total
a__ = '' if prefix is None else prefix
a__ = leave
a__ = parent
a__ = width
a__ = None
a__ = None
a__ = None
def lowercase__ ( self , _a , _a = False , _a = None ):
"""simple docstring"""
a__ = value
if comment is not None:
a__ = comment
if self.last_value is None:
a__ = a__ = time.time()
a__ = a__ = value
a__ = a__ = None
a__ = self.warmup
a__ = 1
self.update_bar(_a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
a__ = time.time()
a__ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
a__ = self.elapsed_time / (value - self.start_value)
else:
a__ = None
if value >= self.total:
a__ = self.total
a__ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
a__ = self.average_time_per_item * (self.total - value)
self.update_bar(_a )
a__ = value
a__ = current_time
if self.average_time_per_item is None:
a__ = 1
else:
a__ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowercase__ ( self , _a , _a=None ):
"""simple docstring"""
a__ = ' ' * (len(str(self.total ) ) - len(str(_a ) )) + str(_a )
if self.elapsed_time is None:
a__ = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
a__ = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
a__ = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowercase__ ( self ):
"""simple docstring"""
a__ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
a__ = disp.display(disp.HTML(self.html_code ) , display_id=_a )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowercase__ ( self ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self , _a , _a=None ):
"""simple docstring"""
super().__init__(_a )
a__ = None if column_names is None else [column_names]
a__ = None
def lowercase__ ( self ):
"""simple docstring"""
a__ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
a__ = disp.display(disp.HTML(self.html_code ) , display_id=_a )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowercase__ ( self , _a ):
"""simple docstring"""
if self.inner_table is None:
a__ = [list(values.keys() ), list(values.values() )]
else:
a__ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_a )
a__ = columns
self.inner_table.append([values[c] for c in columns] )
def lowercase__ ( self , _a , _a=None , _a=300 ):
"""simple docstring"""
a__ = NotebookProgressBar(_a , prefix=_a , parent=self , width=_a )
return self.child_bar
def lowercase__ ( self ):
"""simple docstring"""
a__ = None
self.display()
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
a__ = None
a__ = None
a__ = False
def lowercase__ ( self , _a , _a , _a , **_a ):
"""simple docstring"""
a__ = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
a__ = 0
a__ = 0
a__ = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
a__ = NotebookTrainingTracker(state.max_steps , _a )
def lowercase__ ( self , _a , _a , _a , **_a ):
"""simple docstring"""
a__ = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
a__ = False
def lowercase__ ( self , _a , _a , _a , _a=None , **_a ):
"""simple docstring"""
if not has_length(_a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
a__ = self.training_tracker.add_child(len(_a ) )
else:
a__ = NotebookProgressBar(len(_a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowercase__ ( self , _a , _a , _a , **_a ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
a__ = None
def lowercase__ ( self , _a , _a , _a , _a=None , **_a ):
"""simple docstring"""
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
a__ = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
a__ = state.global_step
self.training_tracker.write_line(_a )
def lowercase__ ( self , _a , _a , _a , _a=None , **_a ):
"""simple docstring"""
if self.training_tracker is not None:
a__ = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
a__ = log['loss']
break
if self.first_column == "Epoch":
a__ = int(state.epoch )
else:
a__ = state.global_step
a__ = 'eval'
for k in metrics:
if k.endswith('_loss' ):
a__ = re.sub(r'\_loss$' , '' , _a )
a__ = metrics.pop('total_flos' , _a )
a__ = metrics.pop('epoch' , _a )
a__ = metrics.pop(F'''{metric_key_prefix}_runtime''' , _a )
a__ = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , _a )
a__ = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , _a )
a__ = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , _a )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
a__ = v
else:
a__ = k.split('_' )
a__ = ' '.join([part.capitalize() for part in splits[1:]] )
a__ = v
self.training_tracker.write_line(_a )
self.training_tracker.remove_child()
a__ = None
# Evaluation takes a long time so we should force the next update.
a__ = True
def lowercase__ ( self , _a , _a , _a , **_a ):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=_a )
a__ = None
| 394
| 1
|
"""simple docstring"""
def A ( snake_case__ = 50 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 616
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 616
| 1
|
'''simple docstring'''
def lowercase__( _UpperCamelCase : Union[str, Any] )-> Tuple:
"""simple docstring"""
_UpperCamelCase = [0] * len(_UpperCamelCase )
_UpperCamelCase = []
_UpperCamelCase = [1] * len(_UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCamelCase )
while queue:
_UpperCamelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_UpperCamelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_UpperCamelCase )
print(max(_UpperCamelCase ) )
# Adjacency list of Graph
snake_case_ : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 138
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
snake_case_ : Optional[int] = '''scheduler_config.json'''
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 5
@dataclass
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = 42
class A_ :
'''simple docstring'''
_lowerCAmelCase = SCHEDULER_CONFIG_NAME
_lowerCAmelCase = ["""dtype"""]
_lowerCAmelCase = []
_lowerCAmelCase = True
@classmethod
def a ( cls , A_ = None , A_ = None , A_=False , **A_ , ):
_UpperCamelCase , _UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , **A_ , )
_UpperCamelCase , _UpperCamelCase = cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
if hasattr(A_ , "create_state" ) and getattr(A_ , "has_state" , A_ ):
_UpperCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def a ( self , A_ , A_ = False , **A_ ):
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def a ( self ):
return self._get_compatibles()
@classmethod
def a ( cls ):
_UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
_UpperCamelCase = importlib.import_module(__name__.split("." )[0] )
_UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
def lowercase__( _UpperCamelCase : jnp.ndarray , _UpperCamelCase : Tuple[int] )-> jnp.ndarray:
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : Tuple=0.999 , _UpperCamelCase : Any=jnp.floataa )-> jnp.ndarray:
"""simple docstring"""
def alpha_bar(_UpperCamelCase : Any ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_UpperCamelCase = []
for i in range(_UpperCamelCase ):
_UpperCamelCase = i / num_diffusion_timesteps
_UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@classmethod
def a ( cls , A_ ):
_UpperCamelCase = scheduler.config
if config.trained_betas is not None:
_UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
_UpperCamelCase = 1.0 - betas
_UpperCamelCase = jnp.cumprod(A_ , axis=0 )
return cls(
alphas=A_ , betas=A_ , alphas_cumprod=A_ , )
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = state.alphas_cumprod
_UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
_UpperCamelCase = sqrt_alpha_prod.flatten()
_UpperCamelCase = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
_UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
_UpperCamelCase = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowercase__( _UpperCamelCase : CommonSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray )-> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 138
| 1
|
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''String lengths must match!''' )
lowerCamelCase__ = 0
for chara, chara in zip(__snake_case ,__snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """ClapFeatureExtractor"""
lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29
| 1
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def A ( __UpperCamelCase ) -> List[str]: # picklable for multiprocessing
return x.sum()
def A ( __UpperCamelCase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : int
A__ : str
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = {}
A__ = []
A__ = 1
A__ = [1, 2]
A__ = {'a': 1, 'b': 2}
A__ = {'a': [1, 2], 'b': [3, 4]}
A__ = {'a': {'1': 1}, 'b': 2}
A__ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
A__ = {}
A__ = []
A__ = 2
A__ = [2, 3]
A__ = {'a': 2, 'b': 3}
A__ = {'a': [2, 3], 'b': [4, 5]}
A__ = {'a': {'1': 2}, 'b': 3}
A__ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case ) , _snake_case )
A__ = 2
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case ) , _snake_case )
self.assertEqual(map_nested(_snake_case , _snake_case , num_proc=_snake_case ) , _snake_case )
A__ = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
A__ = {'a': 2, 'b': 0, 'c': 2}
A__ = {
'a': np.eye(2 ).astype(_snake_case ),
'b': np.zeros(3 ).astype(_snake_case ),
'c': np.ones(2 ).astype(_snake_case ),
}
self.assertEqual(map_nested(_snake_case , _snake_case , map_numpy=_snake_case ) , _snake_case )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_snake_case , _snake_case , map_numpy=_snake_case ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_snake_case , _snake_case , map_numpy=_snake_case , num_proc=_snake_case ) , _snake_case )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_snake_case , _snake_case , map_numpy=_snake_case , num_proc=_snake_case ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_snake_case ): # can't pickle a local lambda
map_nested(lambda _snake_case : x + 1 , _snake_case , num_proc=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = {'a': 1, 'b': 2}
A__ = {'a': 3, 'b': 4}
A__ = {'a': 5, 'b': 6}
A__ = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_snake_case , _snake_case , _snake_case ) ) , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
class __lowerCAmelCase :
"""simple docstring"""
A__ : int = "bar"
A__ = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(_snake_case , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
A__ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A__ = map_nested(lambda __UpperCamelCase : x + 10 , __UpperCamelCase , num_proc=__UpperCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@require_tf
def _a ( self : Optional[int] ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
A__ = layers.Dense(2 )
def gen_random_output():
A__ = tf.random.uniform((1, 3) )
return model(_snake_case ).numpy()
with temp_seed(42 , set_tensorflow=_snake_case ):
A__ = gen_random_output()
with temp_seed(42 , set_tensorflow=_snake_case ):
A__ = gen_random_output()
A__ = gen_random_output()
np.testing.assert_equal(_snake_case , _snake_case )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _a ( self : Optional[Any] ):
"""simple docstring"""
import torch
def gen_random_output():
A__ = torch.nn.Linear(3 , 2 )
A__ = torch.rand(1 , 3 )
return model(_snake_case ).detach().numpy()
with temp_seed(42 , set_pytorch=_snake_case ):
A__ = gen_random_output()
with temp_seed(42 , set_pytorch=_snake_case ):
A__ = gen_random_output()
A__ = gen_random_output()
np.testing.assert_equal(_snake_case , _snake_case )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _a ( self : List[Any] ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A__ = gen_random_output()
with temp_seed(42 ):
A__ = gen_random_output()
A__ = gen_random_output()
np.testing.assert_equal(_snake_case , _snake_case )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def A ( __UpperCamelCase ) -> List[str]:
A__ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def A ( __UpperCamelCase , __UpperCamelCase ) -> str:
A__ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def A ( ) -> Tuple:
A__ = A(x=1 , y='foobar' )
A__ = {'x': 1, 'y': 'foobar'}
assert asdict(__UpperCamelCase ) == expected_output
A__ = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
A__ = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 , y='foo' )] )
def A ( __UpperCamelCase ) -> str:
return text.split()
def A ( __UpperCamelCase ) -> Tuple:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def A ( ) -> Any:
with Pool(2 ) as pool:
A__ = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A__ = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A__ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(__UpperCamelCase ) == 4
| 9
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = dataset
_A = process
_A = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : str , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = self.dataset[i]
_A = self.process(__UpperCAmelCase , **self.params )
return processed
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Tuple=None ):
'''simple docstring'''
_A = loader
_A = infer
_A = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_A = None
_A = loader_batch_size
# Internal bookkeeping
_A = None
_A = None
def __len__( self : Dict ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[Any] ):
'''simple docstring'''
_A = iter(self.loader )
return self
def lowerCAmelCase ( self : str ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_A = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_A = {}
for k, element in self._loader_batch_data.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# Convert ModelOutput to tuple first
_A = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_A = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_A = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_A = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_A = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_A = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_A = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_A = self._loader_batch_data.__class__(__UpperCAmelCase )
self._loader_batch_index += 1
return result
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_A = next(self.iterator )
_A = self.infer(__UpperCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__UpperCAmelCase , torch.Tensor ):
_A = processed
else:
_A = list(processed.keys() )[0]
_A = processed[key]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = len(__UpperCAmelCase )
else:
_A = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_A = observed_batch_size
# Setting internal index to unwrap the batch
_A = processed
_A = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any]=None ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __iter__( self : List[str] ):
'''simple docstring'''
_A = iter(self.loader )
_A = None
return self
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.subiterator is None:
_A = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_A = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_A = self.infer(next(self.iterator ) , **self.params )
_A = next(self.subiterator )
return processed
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
_A = iter(self.loader )
return self
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = False
_A = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_A = self.loader_batch_item()
_A = item.pop("is_last" )
accumulator.append(__UpperCAmelCase )
if is_last:
return accumulator
while not is_last:
_A = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(__UpperCAmelCase , torch.Tensor ):
_A = processed
else:
_A = list(processed.keys() )[0]
_A = processed[key]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = len(__UpperCAmelCase )
else:
_A = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_A = observed_batch_size
_A = processed
_A = 0
while self._loader_batch_index < self.loader_batch_size:
_A = self.loader_batch_item()
_A = item.pop("is_last" )
accumulator.append(__UpperCAmelCase )
if is_last:
return accumulator
else:
_A = processed
_A = item.pop("is_last" )
accumulator.append(__UpperCAmelCase )
return accumulator
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : Dataset , __UpperCAmelCase : str ):
'''simple docstring'''
_A = dataset
_A = key
def __len__( self : str ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple , __UpperCAmelCase : str ):
'''simple docstring'''
return self.dataset[i][self.key]
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Dataset , __UpperCAmelCase : str , __UpperCAmelCase : str ):
'''simple docstring'''
_A = dataset
_A = keya
_A = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : int , __UpperCAmelCase : Tuple ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 330
| 0
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ : Union[str, Any] = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase( __lowerCamelCase ):
if isinstance(__lowerCamelCase , torch.Tensor ):
return image
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
__a = [image]
__a = [trans(img.convert('RGB' ) ) for img in image]
__a = torch.stack(__lowerCamelCase )
return image
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
__a = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
# get the original timestep using init_timestep
__a = min(int(num_inference_steps * strength ) , UpperCAmelCase )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> List[str]:
if not isinstance(UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase )}''' )
__a = image.to(device=UpperCAmelCase , dtype=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__a = init_latents.shape
__a = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase )
# get latents
print('add noise to latents at timestep' , UpperCAmelCase )
__a = self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = 0.8 , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 0.0 , UpperCAmelCase = 5_0 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(UpperCAmelCase )
# 2. Preprocess image
__a = preprocess(UpperCAmelCase )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
__a , __a = self.get_timesteps(UpperCAmelCase , UpperCAmelCase , self.device )
__a = timesteps[:1].repeat(UpperCAmelCase )
# 4. Prepare latent variables
__a = self.prepare_latents(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.unet.dtype , self.device , UpperCAmelCase )
__a = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCAmelCase ):
# 1. predict noise model_output
__a = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , eta=UpperCAmelCase , use_clipped_model_output=UpperCAmelCase , generator=UpperCAmelCase , ).prev_sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCAmelCase )
| 709
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ : Dict = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""GLPNFeatureExtractor"""]
lowerCamelCase_ : Any = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 246
| 0
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case : Optional[int] = get_logger(__name__)
snake_case : Union[str, Any] = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _snake_case :
@add_start_docstrings(_a )
def __call__( self , _a , _a ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case :
@add_start_docstrings(_a )
def __call__( self , _a , _a ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case ( snake_case ):
@add_start_docstrings(_a )
def __call__( self , _a , _a , _a , **_a ):
for processor in self:
__magic_name__ : str = inspect.signature(processor.__call__ ).parameters
if len(_a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__magic_name__ : Tuple = processor(_a , _a , _a , **_a )
else:
__magic_name__ : int = processor(_a , _a , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
if not isinstance(_a , _a ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__magic_name__ : str = temperature
def __call__( self , _a , _a , _a ):
__magic_name__ : List[Any] = scores / self.temperature
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a = -float("Inf" ) , _a = 1 ):
if not isinstance(_a , _a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_a , _a ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__magic_name__ : List[str] = top_p
__magic_name__ : Optional[Any] = filter_value
__magic_name__ : Tuple = min_tokens_to_keep
def __call__( self , _a , _a , _a ):
__magic_name__ , __magic_name__ : str = lax.top_k(_a , scores.shape[-1] )
__magic_name__ : Dict = jnp.full_like(_a , self.filter_value )
__magic_name__ : Union[str, Any] = jax.nn.softmax(_a , axis=-1 ).cumsum(axis=-1 )
__magic_name__ : int = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__magic_name__ : Tuple = jnp.roll(_a , 1 )
score_mask |= score_mask.at[:, 0].set(_a )
# min tokens to keep
__magic_name__ : int = score_mask.at[:, : self.min_tokens_to_keep].set(_a )
__magic_name__ : Any = jnp.where(_a , _a , _a )
__magic_name__ : Union[str, Any] = jax.lax.sort_key_val(_a , _a )[-1]
return next_scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a = -float("Inf" ) , _a = 1 ):
if not isinstance(_a , _a ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__magic_name__ : str = max(_a , _a )
__magic_name__ : Any = filter_value
def __call__( self , _a , _a , _a ):
__magic_name__ , __magic_name__ : Optional[Any] = scores.shape
__magic_name__ : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value )
__magic_name__ : Tuple = min(self.top_k , scores.shape[-1] ) # Safety check
__magic_name__ , __magic_name__ : str = lax.top_k(_a , _a )
__magic_name__ : Optional[int] = jnp.broadcast_to((jnp.arange(_a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__magic_name__ : int = topk_scores.flatten()
__magic_name__ : List[Any] = topk_indices.flatten() + shift
__magic_name__ : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(_a )
__magic_name__ : List[Any] = next_scores_flat.reshape(_a , _a )
return next_scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
__magic_name__ : Dict = bos_token_id
def __call__( self , _a , _a , _a ):
__magic_name__ : Optional[int] = jnp.full(scores.shape , -float("inf" ) )
__magic_name__ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
__magic_name__ : Dict = jnp.where(_a , new_scores.at[:, self.bos_token_id].set(0 ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
__magic_name__ : Tuple = max_length
__magic_name__ : int = eos_token_id
def __call__( self , _a , _a , _a ):
__magic_name__ : Dict = jnp.full(scores.shape , -float("inf" ) )
__magic_name__ : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__magic_name__ : Dict = jnp.where(_a , new_scores.at[:, self.eos_token_id].set(0 ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
if not isinstance(_a , _a ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_a , _a ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__magic_name__ : int = min_length
__magic_name__ : Any = eos_token_id
def __call__( self , _a , _a , _a ):
# create boolean flag to decide if min length penalty should be applied
__magic_name__ : Optional[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__magic_name__ : List[Any] = jnp.where(_a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
__magic_name__ : Dict = list(_a )
__magic_name__ : Union[str, Any] = begin_index
def __call__( self , _a , _a , _a ):
__magic_name__ : Any = 1 - jnp.bool_(cur_len - self.begin_index )
__magic_name__ : Dict = jnp.where(_a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
__magic_name__ : List[Any] = list(_a )
def __call__( self , _a , _a , _a ):
__magic_name__ : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
__magic_name__ : Tuple = dict(_a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__magic_name__ : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__magic_name__ : Any = force_token_array.at[index].set(_a )
__magic_name__ : List[Any] = jnp.intaa(_a )
def __call__( self , _a , _a , _a ):
def _force_token(_a ):
__magic_name__ : Any = scores.shape[0]
__magic_name__ : int = self.force_token_array[generation_idx]
__magic_name__ : Union[str, Any] = jnp.ones_like(_a , dtype=scores.dtype ) * -float("inf" )
__magic_name__ : str = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__magic_name__ : Any = lax.dynamic_update_slice(_a , _a , (0, current_token) )
return new_scores
__magic_name__ : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_a ) , lambda: scores , ) , )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a , _a ):
__magic_name__ : Optional[int] = generate_config.eos_token_id
__magic_name__ : List[Any] = generate_config.no_timestamps_token_id
__magic_name__ : Optional[Any] = generate_config.no_timestamps_token_id + 1
__magic_name__ : int = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_a , "max_initial_timestamp_index" ):
__magic_name__ : List[Any] = generate_config.max_initial_timestamp_index
else:
__magic_name__ : str = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__magic_name__ : str = model_config.vocab_size
def __call__( self , _a , _a , _a ):
# suppress <|notimestamps|> which is handled by without_timestamps
__magic_name__ : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_a , _a ):
__magic_name__ : int = jnp.where((cur_len - self.begin_index) >= 1 , _a , _a )
__magic_name__ : str = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _a , )
__magic_name__ : Dict = jnp.where((cur_len - self.begin_index) < 2 , _a , _a )
__magic_name__ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _a , _a , )
return jnp.where(
_a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _a , )
__magic_name__ : Any = jax.vmap(_a )(_a , _a )
__magic_name__ : Dict = jnp.where(cur_len == self.begin_index , _a , _a )
__magic_name__ : List[str] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _a , )
__magic_name__ : Union[str, Any] = self.timestamp_begin + self.max_initial_timestamp_index
__magic_name__ : Tuple = jnp.where(
_a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__magic_name__ : Union[str, Any] = jax.nn.log_softmax(_a , axis=-1 )
def handle_cumulative_probs(_a , _a ):
__magic_name__ : int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__magic_name__ : int = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _a , )
__magic_name__ : int = jax.vmap(_a )(_a , _a )
return scores
| 124
|
from typing import Dict
from .base import GenericTensor, Pipeline
class _snake_case ( snake_case ):
def SCREAMING_SNAKE_CASE ( self , _a=None , _a=None , _a=None , **_a ):
if tokenize_kwargs is None:
__magic_name__ : Tuple = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
__magic_name__ : Dict = truncation
__magic_name__ : str = tokenize_kwargs
__magic_name__ : Any = {}
if return_tensors is not None:
__magic_name__ : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self , _a , **_a ):
__magic_name__ : List[Any] = self.framework
__magic_name__ : Union[str, Any] = self.tokenizer(_a , return_tensors=_a , **_a )
return model_inputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : List[Any] = self.model(**_a )
return model_outputs
def SCREAMING_SNAKE_CASE ( self , _a , _a=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_a , **_a ):
return super().__call__(*_a , **_a )
| 124
| 1
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
A_ = logging.get_logger(__name__)
class UpperCamelCase__ ( a ):
'''simple docstring'''
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Tuple:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(SCREAMING_SNAKE_CASE ) )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = [sequences]
__lowerCAmelCase : str = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(SCREAMING_SNAKE_CASE )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(a )
class UpperCamelCase__ ( a ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE=ZeroShotClassificationArgumentHandler() , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase : List[Any] = args_parser
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def snake_case ( self ) -> Union[str, Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=TruncationStrategy.ONLY_FIRST , **SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase : Optional[int] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
__lowerCAmelCase : Optional[Any] = self.tokenizer.eos_token
try:
__lowerCAmelCase : Dict = self.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , )
except Exception as e:
if "too short" in str(SCREAMING_SNAKE_CASE ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__lowerCAmelCase : Optional[int] = self.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def snake_case ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
if kwargs.get('multi_class' , SCREAMING_SNAKE_CASE ) is not None:
__lowerCAmelCase : str = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
__lowerCAmelCase : Tuple = {}
if "candidate_labels" in kwargs:
__lowerCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
__lowerCAmelCase : int = kwargs['hypothesis_template']
__lowerCAmelCase : int = {}
if "multi_label" in kwargs:
__lowerCAmelCase : Any = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[Any]:
if len(SCREAMING_SNAKE_CASE ) == 0:
pass
elif len(SCREAMING_SNAKE_CASE ) == 1 and "candidate_labels" not in kwargs:
__lowerCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="This example is {}." ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase : Tuple = self._args_parser(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, (candidate_label, sequence_pair) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : Dict = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(SCREAMING_SNAKE_CASE ) - 1,
**model_input,
}
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase : List[str] = inputs['candidate_label']
__lowerCAmelCase : Optional[int] = inputs['sequence']
__lowerCAmelCase : Tuple = {k: inputs[k] for k in self.tokenizer.model_input_names}
__lowerCAmelCase : int = self.model(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
__lowerCAmelCase : Optional[Any] = [outputs['candidate_label'] for outputs in model_outputs]
__lowerCAmelCase : Any = [outputs['sequence'] for outputs in model_outputs]
__lowerCAmelCase : Any = np.concatenate([output['logits'].numpy() for output in model_outputs] )
__lowerCAmelCase : Optional[Any] = logits.shape[0]
__lowerCAmelCase : int = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = N // n
__lowerCAmelCase : Optional[Any] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(SCREAMING_SNAKE_CASE ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__lowerCAmelCase : List[str] = self.entailment_id
__lowerCAmelCase : List[Any] = -1 if entailment_id == 0 else 0
__lowerCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
__lowerCAmelCase : int = np.exp(SCREAMING_SNAKE_CASE ) / np.exp(SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__lowerCAmelCase : Optional[int] = reshaped_outputs[..., self.entailment_id]
__lowerCAmelCase : str = np.exp(SCREAMING_SNAKE_CASE ) / np.exp(SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 123
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( a , unittest.TestCase ):
'''simple docstring'''
_snake_case = OpenAIGPTTokenizer
_snake_case = OpenAIGPTTokenizerFast
_snake_case = True
_snake_case = False
def snake_case ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowerCAmelCase : Tuple = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Any:
return "lower newer", "lower newer"
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : List[str] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__lowerCAmelCase : List[str] = 'lower'
__lowerCAmelCase : Union[str, Any] = ['low', 'er</w>']
__lowerCAmelCase : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = tokens + ['<unk>']
__lowerCAmelCase : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self , SCREAMING_SNAKE_CASE=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
__lowerCAmelCase : Optional[Any] = 'This is a simple input'
__lowerCAmelCase : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
__lowerCAmelCase : int = ('This is a simple input', 'This is a pair')
__lowerCAmelCase : Optional[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
def snake_case ( self ) -> int:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCamelCase__ ( a ):
'''simple docstring'''
pass
| 123
| 1
|
# Function to print upper half of diamond (pyramid)
def __lowerCAmelCase ( a__ ) -> List[str]:
for i in range(0 , a__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def __lowerCAmelCase ( a__ ) -> Any:
for i in range(a__ , 0 , -1 ):
for _ in range(a__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def __lowerCAmelCase ( a__ ) -> Tuple:
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(a__ ) # upper half
reverse_floyd(a__ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
A : Dict = 1
while K:
A : Any = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
A : Optional[int] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 219
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
A : Any = logging.get_logger(__name__)
class __A:
def __init__( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = question_encoder
__a = generator
__a = self.question_encoder
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
if os.path.isfile(_snake_case ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_snake_case , exist_ok=_snake_case )
__a = os.path.join(_snake_case , '''question_encoder_tokenizer''' )
__a = os.path.join(_snake_case , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(_snake_case )
self.generator.save_pretrained(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
__a = kwargs.pop('''config''' , _snake_case )
if config is None:
__a = RagConfig.from_pretrained(_snake_case )
__a = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
__a = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=_snake_case , generator=_snake_case )
def __call__( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
return self.current_tokenizer(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.generator.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
return self.generator.decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.question_encoder
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.generator
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = "longest" , _snake_case = None , _snake_case = True , **_snake_case , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
__a = labels['''input_ids''']
return model_inputs
| 219
| 1
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
class snake_case__ ( UpperCamelCase):
a_ = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 2_55 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : List[str] , ) -> None:
super().__init__(**_A )
UpperCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 2_24}
UpperCAmelCase_ : str = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
UpperCAmelCase_ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : Union[str, Any] = size
UpperCAmelCase_ : int = resample
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Union[str, Any] = do_rescale
UpperCAmelCase_ : List[str] = rescale_factor
UpperCAmelCase_ : List[Any] = do_normalize
UpperCAmelCase_ : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A ( self : Tuple , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
UpperCAmelCase_ : Union[str, Any] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase_ : int = int((2_56 / 2_24) * size['''shortest_edge'''] )
UpperCAmelCase_ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase_ : Any = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def A ( self : List[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def A ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[Any] , ) -> np.ndarray:
return rescale(_A , scale=_A , data_format=_A , **_A )
def A ( self : Optional[int] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def A ( self : Optional[int] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Optional[int] , ) -> BatchFeature:
UpperCAmelCase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[int] = size if size is not None else self.size
UpperCAmelCase_ : Dict = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : str = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase_ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase_ : Dict = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase_ : List[Any] = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase_ : Tuple = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase_ : str = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase_ : Optional[int] = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 216
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = KandinskyImgaImgPipeline
a_ = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a_ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a_ = False
@property
def A ( self : Optional[Any] ) -> Tuple:
return 32
@property
def A ( self : Tuple ) -> Tuple:
return 32
@property
def A ( self : str ) -> List[str]:
return self.time_input_dim
@property
def A ( self : List[str] ) -> int:
return self.time_input_dim * 4
@property
def A ( self : int ) -> str:
return 1_00
@property
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def A ( self : Optional[Any] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
UpperCAmelCase_ : str = MultilingualCLIP(_A )
UpperCAmelCase_ : Tuple = text_encoder.eval()
return text_encoder
@property
def A ( self : int ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase_ : Tuple = UNetaDConditionModel(**_A )
return model
@property
def A ( self : List[str] ) -> Tuple:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : str ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Any ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : Optional[int] = self.dummy_tokenizer
UpperCAmelCase_ : Optional[int] = self.dummy_unet
UpperCAmelCase_ : Optional[Any] = self.dummy_movq
UpperCAmelCase_ : Optional[int] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase_ : Tuple = DDIMScheduler(**_A )
UpperCAmelCase_ : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : str , _A : Optional[int] , _A : Union[str, Any]=0 ) -> str:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
UpperCAmelCase_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[Any] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Union[str, Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Dict ) -> int:
UpperCAmelCase_ : str = '''cpu'''
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
UpperCAmelCase_ : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : List[str] = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : List[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase):
def A ( self : Tuple ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
UpperCAmelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase_ : Tuple = '''A red cartoon frog, 4k'''
UpperCAmelCase_ : Dict = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase_ : Any = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
UpperCAmelCase_ : Optional[int] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ : str = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase_ : Any = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
UpperCAmelCase_ : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_A , _A )
| 216
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : str = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class a (lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = """ibert"""
def __init__( self : str , lowerCamelCase : Optional[Any]=30522 , lowerCamelCase : List[Any]=768 , lowerCamelCase : str=12 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Tuple=3072 , lowerCamelCase : Dict="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Dict=0.1 , lowerCamelCase : str=512 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : List[Any]=1E-12 , lowerCamelCase : Any=1 , lowerCamelCase : Optional[int]=0 , lowerCamelCase : int=2 , lowerCamelCase : Any="absolute" , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Optional[int]="none" , **lowerCamelCase : Dict , ) -> List[str]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
__snake_case : Dict = vocab_size
__snake_case : int = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Tuple = hidden_act
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : Tuple = type_vocab_size
__snake_case : List[str] = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Optional[int] = position_embedding_type
__snake_case : Any = quant_mode
__snake_case : Dict = force_dequant
class a (lowerCamelCase_ ):
"""simple docstring"""
@property
def __snake_case ( self : List[str] ) -> Union[str, Any]:
if self.task == "multiple-choice":
__snake_case : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 81
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_lowercase: Dict = cst_fwd.get(__magic_name__ , np.inf )
_lowercase: Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_lowercase: Optional[Any] = new_cost_f
_lowercase: Union[str, Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_lowercase: List[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: List[Any] = -1
_lowercase: Union[str, Any] = set()
_lowercase: Any = set()
_lowercase: Tuple = {source: 0}
_lowercase: Tuple = {destination: 0}
_lowercase: Optional[Any] = {source: None}
_lowercase: Any = {destination: None}
_lowercase: PriorityQueue[Any] = PriorityQueue()
_lowercase: PriorityQueue[Any] = PriorityQueue()
_lowercase: Optional[int] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_lowercase , _lowercase: Optional[Any] = queue_forward.get()
visited_forward.add(__magic_name__ )
_lowercase , _lowercase: List[str] = queue_backward.get()
visited_backward.add(__magic_name__ )
_lowercase: Optional[int] = pass_and_relaxation(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
_lowercase: List[str] = pass_and_relaxation(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_lowercase: Optional[Any] = shortest_distance
return shortest_path_distance
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
_SCREAMING_SNAKE_CASE : Tuple = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class A__ ( _UpperCAmelCase ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase__ , **lowercase__ )
__magic_name__ : Union[str, Any] = {}
def lowercase ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Optional[int] = super().add_tokens(lowercase__ , *lowercase__ , **lowercase__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
''' `placeholder_token` that is not already in the tokenizer.''' )
def lowercase ( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=1 , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
__magic_name__ : Optional[int] = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowercase__ , *lowercase__ , **lowercase__ )
output.append(lowercase__ )
else:
__magic_name__ : Dict = []
for i in range(lowercase__ ):
__magic_name__ : Union[str, Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(lowercase__ , *lowercase__ , **lowercase__ )
output.append(lowercase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
__magic_name__ : str = output
def lowercase ( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 ) -> List[Any]:
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
__magic_name__ : Dict = []
for i in range(len(lowercase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowercase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__magic_name__ : str = self.token_map[placeholder_token]
__magic_name__ : str = tokens[: 1 + int(len(lowercase__ ) * prop_tokens_to_load )]
if vector_shuffle:
__magic_name__ : Optional[Any] = copy.copy(lowercase__ )
random.shuffle(lowercase__ )
__magic_name__ : Optional[int] = text.replace(lowercase__ , ''' '''.join(lowercase__ ) )
return text
def __call__( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowercase__ , vector_shuffle=lowercase__ , prop_tokens_to_load=lowercase__ ) , *lowercase__ , **lowercase__ , )
def lowercase ( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
lowercase__ , vector_shuffle=lowercase__ , prop_tokens_to_load=lowercase__ ) , *lowercase__ , **lowercase__ , )
| 704
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : torch.FloatTensor
lowerCamelCase__ : Optional[torch.FloatTensor] =None
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase=0.9_99, UpperCAmelCase="cosine", ) ->Optional[Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__magic_name__ : List[Any] = []
for i in range(UpperCAmelCase ):
__magic_name__ : Tuple = i / num_diffusion_timesteps
__magic_name__ : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ), UpperCAmelCase ) )
return torch.tensor(UpperCAmelCase, dtype=torch.floataa )
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Union[str, Any] =1
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = 0.0_0_0_1 , lowerCamelCase = 0.0_2 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 0 , lowerCamelCase = "epsilon" , lowerCamelCase = 1.0 , **lowerCamelCase , ) -> Optional[Any]:
"""simple docstring"""
if kwargs.get('''set_alpha_to_one''' , lowerCamelCase ) is not None:
__magic_name__ : Any = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCamelCase , standard_warn=lowerCamelCase )
__magic_name__ : Tuple = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__magic_name__ : Any = torch.tensor(lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__magic_name__ : Union[str, Any] = torch.linspace(lowerCamelCase , lowerCamelCase , lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__magic_name__ : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__magic_name__ : List[str] = betas_for_alpha_bar(lowerCamelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
__magic_name__ : Dict = 1.0 - self.betas
__magic_name__ : List[str] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__magic_name__ : str = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__magic_name__ : int = 1.0
# setable values
__magic_name__ : List[str] = None
__magic_name__ : Dict = torch.from_numpy(np.arange(0 , lowerCamelCase ).copy().astype(np.intaa ) )
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
__magic_name__ : int = num_inference_steps
__magic_name__ : Tuple = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__magic_name__ : Any = (np.arange(0 , lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
__magic_name__ : Optional[int] = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
self.timesteps += self.config.steps_offset
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
__magic_name__ : Dict = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__magic_name__ : List[Any] = self.alphas_cumprod[timestep]
__magic_name__ : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__magic_name__ : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__magic_name__ : Union[str, Any] = model_output
elif self.config.prediction_type == "sample":
__magic_name__ : Dict = model_output
__magic_name__ : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__magic_name__ : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__magic_name__ : Optional[int] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Any = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
def __len__( self ) -> int:
"""simple docstring"""
return self.config.num_train_timesteps
| 336
| 0
|
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = tuple[int, int, int]
SCREAMING_SNAKE_CASE_ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE_ = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
SCREAMING_SNAKE_CASE_ = 'FOBHMDKEXQNRAULPGSJVTYICZW'
SCREAMING_SNAKE_CASE_ = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
SCREAMING_SNAKE_CASE_ = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE_ = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
SCREAMING_SNAKE_CASE_ = 'SGLCPQWZHKXAREONTFBVIYJUDM'
SCREAMING_SNAKE_CASE_ = 'HVSICLTYKQUBXDWAJZOMFGPREN'
SCREAMING_SNAKE_CASE_ = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
SCREAMING_SNAKE_CASE_ = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
SCREAMING_SNAKE_CASE_ = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def UpperCamelCase__ ( _lowercase : RotorPositionT , _lowercase : RotorSelectionT , _lowercase : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_lowercase ) )) < 3:
__UpperCAmelCase: int = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_lowercase )
# Checks if rotor positions are valid
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[Any] = rotpos
if not 0 < rotorposa <= len(_lowercase ):
__UpperCAmelCase: int = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
__UpperCAmelCase: Tuple = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
__UpperCAmelCase: str = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_lowercase )
# Validates string and returns dict
__UpperCAmelCase: Any = _plugboard(_lowercase )
return rotpos, rotsel, pbdict
def UpperCamelCase__ ( _lowercase : str ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_lowercase , _lowercase ):
__UpperCAmelCase: Any = F'''Plugboard setting isn\'t type string ({type(_lowercase )})'''
raise TypeError(_lowercase )
elif len(_lowercase ) % 2 != 0:
__UpperCAmelCase: Union[str, Any] = F'''Odd number of symbols ({len(_lowercase )})'''
raise Exception(_lowercase )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
__UpperCAmelCase: Tuple = set()
for i in pbstring:
if i not in abc:
__UpperCAmelCase: Optional[Any] = F'''\'{i}\' not in list of symbols'''
raise Exception(_lowercase )
elif i in tmppbl:
__UpperCAmelCase: Any = F'''Duplicate symbol ({i})'''
raise Exception(_lowercase )
else:
tmppbl.add(_lowercase )
del tmppbl
# Created the dictionary
__UpperCAmelCase: Any = {}
for j in range(0 , len(_lowercase ) - 1 , 2 ):
__UpperCAmelCase: int = pbstring[j + 1]
__UpperCAmelCase: str = pbstring[j]
return pb
def UpperCamelCase__ ( _lowercase : str , _lowercase : RotorPositionT , _lowercase : RotorSelectionT = (rotora, rotora, rotora) , _lowercase : str = "" , ) -> str:
__UpperCAmelCase: List[Any] = text.upper()
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Dict = _validator(
_lowercase , _lowercase , plugb.upper() )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = rotor_position
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[str] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__UpperCAmelCase: Any = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__UpperCAmelCase: Optional[int] = plugboard[symbol]
# rotor ra --------------------------
__UpperCAmelCase: int = abc.index(_lowercase ) + rotorposa
__UpperCAmelCase: Any = rotora[index % len(_lowercase )]
# rotor rb --------------------------
__UpperCAmelCase: List[str] = abc.index(_lowercase ) + rotorposa
__UpperCAmelCase: int = rotora[index % len(_lowercase )]
# rotor rc --------------------------
__UpperCAmelCase: Dict = abc.index(_lowercase ) + rotorposa
__UpperCAmelCase: Union[str, Any] = rotora[index % len(_lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__UpperCAmelCase: Dict = reflector[symbol]
# 2nd rotors
__UpperCAmelCase: int = abc[rotora.index(_lowercase ) - rotorposa]
__UpperCAmelCase: Tuple = abc[rotora.index(_lowercase ) - rotorposa]
__UpperCAmelCase: Any = abc[rotora.index(_lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__UpperCAmelCase: Optional[int] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowercase ):
__UpperCAmelCase: Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
__UpperCAmelCase: int = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
__UpperCAmelCase: str = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 'This is my Python script that emulates the Enigma machine from WWII.'
SCREAMING_SNAKE_CASE_ = (1, 1, 1)
SCREAMING_SNAKE_CASE_ = 'pictures'
SCREAMING_SNAKE_CASE_ = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE_ = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 523
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spm_char.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'microsoft/speecht5_asr': 10_24,
'microsoft/speecht5_tts': 10_24,
'microsoft/speecht5_vc': 10_24,
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
__UpperCAmelCase: Union[str, Any] = vocab_file
__UpperCAmelCase: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = self.__dict__.copy()
__UpperCAmelCase: List[Any] = None
return state
def __setstate__( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase: List[Any] = {}
__UpperCAmelCase: Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return self.sp_model.piece_to_id(snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = self.sp_model.IdToPiece(snake_case_ )
return token
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = []
__UpperCAmelCase: Optional[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
__UpperCAmelCase: Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def lowercase_ ( self , snake_case_ , snake_case_=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
__UpperCAmelCase: Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(snake_case_ )) + suffix_ones
return ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def lowercase_ ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase: Union[str, Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
__UpperCAmelCase: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 523
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase = tempfile.mkdtemp()
lowercase = BlipImageProcessor()
lowercase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
lowercase = BlipProcessor(a , a )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : Any , **a : Union[str, Any] ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).tokenizer
def _lowerCAmelCase ( self : Tuple , **a : Any ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase = self.get_image_processor(do_normalize=a , padding_value=1.0 )
lowercase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=a , image_processor=a )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(a , return_tensors='''np''' )
lowercase = processor(images=a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=a , image_processor=a )
lowercase = '''lower newer'''
lowercase = processor(text=a )
lowercase = tokenizer(a , return_token_type_ids=a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=a , image_processor=a )
lowercase = '''lower newer'''
lowercase = self.prepare_image_inputs()
lowercase = processor(text=a , images=a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=a , image_processor=a )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(a )
lowercase = tokenizer.batch_decode(a )
self.assertListEqual(a , a )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=a , image_processor=a )
lowercase = '''lower newer'''
lowercase = self.prepare_image_inputs()
lowercase = processor(text=a , images=a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 396
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A_ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
lowercase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase = transform(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
return image
def A_ ( __UpperCamelCase : str ):
if "visual_encoder" in key:
lowercase = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , __UpperCamelCase )
if "blocks" in key:
lowercase = re.sub(R'''blocks''' , '''layers''' , __UpperCamelCase )
if "attn" in key:
lowercase = re.sub(R'''attn''' , '''self_attn''' , __UpperCamelCase )
if "norm1" in key:
lowercase = re.sub(R'''norm1''' , '''layer_norm1''' , __UpperCamelCase )
if "norm2" in key:
lowercase = re.sub(R'''norm2''' , '''layer_norm2''' , __UpperCamelCase )
if "encoder.norm" in key:
lowercase = re.sub(R'''encoder.norm''' , '''post_layernorm''' , __UpperCamelCase )
if "encoder.patch_embed.proj" in key:
lowercase = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , __UpperCamelCase )
if "encoder.pos_embed" in key:
lowercase = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , __UpperCamelCase )
if "encoder.cls_token" in key:
lowercase = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , __UpperCamelCase )
if "self_attn" in key:
lowercase = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , __UpperCamelCase )
return key
@torch.no_grad()
def A_ ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=None ):
if config_path is not None:
lowercase = BlipConfig.from_pretrained(__UpperCamelCase )
else:
lowercase = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
lowercase = BlipForConditionalGeneration(__UpperCamelCase ).eval()
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase = blip_decoder(pretrained=__UpperCamelCase , image_size=3_84 , vit='''base''' )
lowercase = pt_model.eval()
lowercase = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
hf_model.load_state_dict(__UpperCamelCase )
lowercase = 3_84
lowercase = load_demo_image(image_size=__UpperCamelCase , device='''cpu''' )
lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase = tokenizer(['''a picture of'''] ).input_ids
lowercase = hf_model.generate(__UpperCamelCase , __UpperCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
lowercase = hf_model.generate(__UpperCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__UpperCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase = blip_vqa(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit='''base''' )
vqa_model.eval()
lowercase = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
lowercase = BlipForQuestionAnswering(__UpperCamelCase )
hf_vqa_model.load_state_dict(__UpperCamelCase )
lowercase = ['''How many dogs are in this image?''']
lowercase = tokenizer(__UpperCamelCase , return_tensors='''pt''' ).input_ids
lowercase = hf_vqa_model.generate(__UpperCamelCase , __UpperCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase = blip_itm(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit='''base''' )
itm_model.eval()
lowercase = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
lowercase = BlipForImageTextRetrieval(__UpperCamelCase )
lowercase = ['''A picture of a woman with a dog sitting in a beach''']
lowercase = tokenizer(
__UpperCamelCase , return_tensors='''pt''' , padding='''max_length''' , truncation=__UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__UpperCamelCase )
hf_itm_model.eval()
lowercase = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase )
lowercase = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 396
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ : Optional[Any] = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
lowercase__ : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase__ : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase__ : str = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0.0_2 , ):
'''simple docstring'''
lowercase__ : List[str] = parent
lowercase__ : Tuple = batch_size
lowercase__ : Union[str, Any] = seq_length
lowercase__ : Tuple = is_training
lowercase__ : Any = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : str = eos_token_id
lowercase__ : Tuple = pad_token_id
lowercase__ : Union[str, Any] = bos_token_id
lowercase__ : int = initializer_range
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
lowercase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
lowercase__ : Dict = shift_tokens_right(SCREAMING_SNAKE_CASE_ , 1 , 2)
lowercase__ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Tuple = prepare_blenderbot_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return config, inputs_dict
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = 20
lowercase__ : Tuple = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = model.encode(inputs_dict["""input_ids"""])
lowercase__ , lowercase__ : Union[str, Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase__ : str = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase__ : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : List[str] = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : List[str] = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = 20
lowercase__ : List[Any] = model_class_name(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = model.encode(inputs_dict["""input_ids"""])
lowercase__ , lowercase__ : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase__ : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ : str = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase__ : Any = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}')
@require_flax
class _snake_case ( unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = 99
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowercase__ : Tuple = input_ids.shape[0]
lowercase__ : List[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ : List[str] = self._get_config_and_data()
lowercase__ : Optional[Any] = FlaxBlenderbotSmallForConditionalGeneration(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = lm_model(input_ids=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowercase__ : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
lowercase__ : Tuple = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
lowercase__ : Optional[int] = lm_model(input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
lowercase__ : int = shift_tokens_right(SCREAMING_SNAKE_CASE_ , 1 , 2)
lowercase__ : Optional[int] = np.equal(SCREAMING_SNAKE_CASE_ , 1).astype(np.floataa).sum()
lowercase__ : int = np.equal(SCREAMING_SNAKE_CASE_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(SCREAMING_SNAKE_CASE_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _snake_case ( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ):
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCAmelCase : Tuple = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = FlaxBlenderbotSmallModelTester(self)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase__ : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE_)
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
return model.encode(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_)
with self.subTest("""JIT Enabled"""):
lowercase__ : str = encode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase__ : int = encode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(jitted_output.shape , output.shape)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase__ : Dict = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , encoder_outputs=SCREAMING_SNAKE_CASE_ , )
with self.subTest("""JIT Enabled"""):
lowercase__ : int = decode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase__ : Optional[Any] = decode_jitted(**SCREAMING_SNAKE_CASE_).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Any = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase__ : Any = np.ones((1, 1)) * model.config.eos_token_id
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
| 12
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : int = (DDPMScheduler,)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : str = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_):
if i == len(SCREAMING_SNAKE_CASE_) - 1:
lowercase__ : Optional[int] = -1
else:
lowercase__ : Tuple = timesteps[i + 1]
lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_)
lowercase__ : int = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = [1_00, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__UpperCamelCase : str = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """albert"""
def __init__( self , UpperCAmelCase__=3_0_0_0_0 , UpperCAmelCase__=1_2_8 , UpperCAmelCase__=4_0_9_6 , UpperCAmelCase__=1_2 , UpperCAmelCase__=1 , UpperCAmelCase__=6_4 , UpperCAmelCase__=1_6_3_8_4 , UpperCAmelCase__=1 , UpperCAmelCase__="gelu_new" , UpperCAmelCase__=0 , UpperCAmelCase__=0 , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0.1 , UpperCAmelCase__="absolute" , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__=3 , **UpperCAmelCase__ , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_A : Optional[Any] = vocab_size
_A : Optional[int] = embedding_size
_A : str = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : Optional[int] = num_hidden_groups
_A : Optional[Any] = num_attention_heads
_A : Tuple = inner_group_num
_A : Tuple = hidden_act
_A : List[Any] = intermediate_size
_A : str = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : List[Any] = type_vocab_size
_A : Any = initializer_range
_A : Tuple = layer_norm_eps
_A : Dict = classifier_dropout_prob
_A : Union[str, Any] = position_embedding_type
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 700
|
'''simple docstring'''
def lowercase ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[int]):
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path)
def lowercase ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : list[int] , lowerCAmelCase : int):
"""simple docstring"""
if curr_ind == len(lowerCAmelCase):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowerCAmelCase)):
if valid_connection(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase):
# Insert current vertex into path as next transition
_A : Tuple = next_ver
# Validate created path
if util_hamilton_cycle(lowerCAmelCase , lowerCAmelCase , curr_ind + 1):
return True
# Backtrack
_A : Optional[int] = -1
return False
def lowercase ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int = 0):
"""simple docstring"""
_A : Optional[Any] = [-1] * (len(lowerCAmelCase) + 1)
# initialize start and end of path with starting index
_A : int = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowerCAmelCase , lowerCAmelCase , 1) else []
| 417
| 0
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''MCTCTFeatureExtractor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
def __call__( self : List[str] , *_UpperCAmelCase : int , **_UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ = kwargs.pop("audio" , _UpperCAmelCase )
UpperCAmelCase_ = kwargs.pop("sampling_rate" , _UpperCAmelCase )
UpperCAmelCase_ = kwargs.pop("text" , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None:
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings["input_ids"]
return inputs
def lowercase__ ( self : List[str] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = kwargs.pop("input_features" , _UpperCAmelCase )
UpperCAmelCase_ = kwargs.pop("labels" , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if input_features is not None:
UpperCAmelCase_ = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
if labels is not None:
UpperCAmelCase_ = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ = labels["input_ids"]
return input_features
def lowercase__ ( self : Tuple , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@contextmanager
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
| 82
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def a__ ( lowerCAmelCase__ ):
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = get_config(lowerCAmelCase__ )
# load original model from timm
UpperCAmelCase_ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = downstream_dict['projector.weight']
SCREAMING_SNAKE_CASE = downstream_dict['projector.bias']
SCREAMING_SNAKE_CASE = downstream_dict['model.post_net.linear.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.post_net.linear.bias']
return model
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = WavaVecaForAudioFrameClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = downstream_dict['model.linear.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.linear.bias']
return model
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = WavaVecaForXVector.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = downstream_dict['connector.weight']
SCREAMING_SNAKE_CASE = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
SCREAMING_SNAKE_CASE = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
SCREAMING_SNAKE_CASE = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
SCREAMING_SNAKE_CASE = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = checkpoint['Downstream']
SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , do_normalize=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification'):
SCREAMING_SNAKE_CASE = convert_classification(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
elif arch.endswith('ForAudioFrameClassification'):
SCREAMING_SNAKE_CASE = convert_diarization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
elif arch.endswith('ForXVector'):
SCREAMING_SNAKE_CASE = convert_xvector(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''')
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_UpperCAmelCase)
hf_model.save_pretrained(_UpperCAmelCase)
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 710
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
debug_launcher(test_script.main)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
debug_launcher(test_ops.main)
| 444
| 0
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , split=_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , num_proc=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Dict = path_or_paths if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else {self.split: path_or_paths}
_lowercase : Tuple = Text(
cache_dir=_lowerCAmelCase , data_files=_lowerCAmelCase , features=_lowerCAmelCase , **_lowerCAmelCase , )
def __a ( self ):
# Build iterable dataset
if self.streaming:
_lowercase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowercase : Any = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = None
_lowercase : str = None
self.builder.download_and_prepare(
download_config=_lowerCAmelCase , download_mode=_lowerCAmelCase , verification_mode=_lowerCAmelCase , base_path=_lowerCAmelCase , num_proc=self.num_proc , )
_lowercase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=_lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 66
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ):
__UpperCamelCase = []
__UpperCamelCase = 0
__UpperCamelCase = 0
def _lowerCamelCase ( self : Dict ):
return self.head == self.tail
def _lowerCamelCase ( self : int , __A : Any ):
self.data.append(__A )
__UpperCamelCase = self.tail + 1
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.data[self.head]
__UpperCamelCase = self.head + 1
return ret
def _lowerCamelCase ( self : Union[str, Any] ):
return self.tail - self.head
def _lowerCamelCase ( self : Optional[int] ):
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class snake_case :
"""simple docstring"""
def __init__( self : str , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = 1
def _lowerCamelCase ( self : str ):
return self.data
def _lowerCamelCase ( self : Optional[int] ):
return self.left
def _lowerCamelCase ( self : Tuple ):
return self.right
def _lowerCamelCase ( self : str ):
return self.height
def _lowerCamelCase ( self : int , __A : Any ):
__UpperCamelCase = data
def _lowerCamelCase ( self : Optional[Any] , __A : MyNode | None ):
__UpperCamelCase = node
def _lowerCamelCase ( self : Tuple , __A : MyNode | None ):
__UpperCamelCase = node
def _lowerCamelCase ( self : List[str] , __A : int ):
__UpperCamelCase = height
def lowercase__ ( __lowercase : MyNode | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
if a > b:
return a
return b
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
print('left rotation node:' , node.get_data() )
__UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__lowercase )
__UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
__UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowercase )
return ret
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
print('right rotation node:' , node.get_data() )
__UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__lowercase )
__UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
__UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__lowercase )
return ret
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
__UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__lowercase ) )
return right_rotation(__lowercase )
def lowercase__ ( __lowercase : MyNode ) -> MyNode:
"""simple docstring"""
__UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__lowercase ) )
return left_rotation(__lowercase )
def lowercase__ ( __lowercase : MyNode | None , __lowercase : Any ) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(__lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__UpperCamelCase = right_rotation(__lowercase )
else:
__UpperCamelCase = lr_rotation(__lowercase )
else:
node.set_right(insert_node(node.get_right() , __lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__UpperCamelCase = rl_rotation(__lowercase )
else:
__UpperCamelCase = left_rotation(__lowercase )
__UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__lowercase )
return node
def lowercase__ ( __lowercase : MyNode ) -> Any:
"""simple docstring"""
while True:
__UpperCamelCase = root.get_right()
if right_child is None:
break
__UpperCamelCase = right_child
return root.get_data()
def lowercase__ ( __lowercase : MyNode ) -> Any:
"""simple docstring"""
while True:
__UpperCamelCase = root.get_left()
if left_child is None:
break
__UpperCamelCase = left_child
return root.get_data()
def lowercase__ ( __lowercase : MyNode , __lowercase : Any ) -> MyNode | None:
"""simple docstring"""
__UpperCamelCase = root.get_left()
__UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__UpperCamelCase = get_left_most(__lowercase )
root.set_data(__lowercase )
root.set_right(del_node(__lowercase , __lowercase ) )
elif left_child is not None:
__UpperCamelCase = left_child
elif right_child is not None:
__UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(__lowercase , __lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__lowercase , __lowercase ) )
if get_height(__lowercase ) - get_height(__lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__UpperCamelCase = left_rotation(__lowercase )
else:
__UpperCamelCase = rl_rotation(__lowercase )
elif get_height(__lowercase ) - get_height(__lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__UpperCamelCase = right_rotation(__lowercase )
else:
__UpperCamelCase = lr_rotation(__lowercase )
__UpperCamelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__lowercase )
return root
class snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__UpperCamelCase = None
def _lowerCamelCase ( self : List[Any] ):
return get_height(self.root )
def _lowerCamelCase ( self : Dict , __A : Any ):
print('insert:' + str(__A ) )
__UpperCamelCase = insert_node(self.root , __A )
def _lowerCamelCase ( self : Any , __A : Any ):
print('delete:' + str(__A ) )
if self.root is None:
print('Tree is empty!' )
return
__UpperCamelCase = del_node(self.root , __A )
def __str__( self : Any , ): # a level traversale, gives a more intuitive look on the tree
__UpperCamelCase = ''
__UpperCamelCase = MyQueue()
q.push(self.root )
__UpperCamelCase = self.get_height()
if layer == 0:
return output
__UpperCamelCase = 0
while not q.is_empty():
__UpperCamelCase = q.pop()
__UpperCamelCase = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__A )
q.push(__A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__UpperCamelCase = cnt + 1
for i in range(1_0_0 ):
if cnt == math.pow(2 , __A ) - 1:
__UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowercase__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a__ : Optional[int] =AVLtree()
a__ : List[str] =list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 399
| 0
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase__ ( _lowerCamelCase ):
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'num_attention_heads' ) )
class lowerCAmelCase__ :
def __init__( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str]=13 , __UpperCamelCase : str=64 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : str=3 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=1 , __UpperCamelCase : Dict=16 , __UpperCamelCase : Tuple=[128, 256, 384] , __UpperCamelCase : List[Any]=[4, 6, 8] , __UpperCamelCase : List[str]=[2, 3, 4] , __UpperCamelCase : Optional[int]=[16, 16, 16] , __UpperCamelCase : str=0 , __UpperCamelCase : Dict=[2, 2, 2] , __UpperCamelCase : str=[2, 2, 2] , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Tuple=2 , ) -> str:
A = parent
A = batch_size
A = image_size
A = num_channels
A = kernel_size
A = stride
A = padding
A = hidden_sizes
A = num_attention_heads
A = depths
A = key_dim
A = drop_path_rate
A = patch_size
A = attention_ratio
A = mlp_ratio
A = initializer_range
A = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
A = is_training
A = use_labels
A = num_labels
A = initializer_range
def __UpperCamelCase ( self : List[str] ) -> Dict:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : str ) -> Tuple:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any ) -> int:
A = LevitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
A = (self.image_size, self.image_size)
A , A = image_size[0], image_size[1]
for _ in range(4 ):
A = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
A = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int ) -> Optional[Any]:
A = self.num_labels
A = LevitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : List[str] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A_ : List[Any] = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = False
A_ : Tuple = False
A_ : List[Any] = False
A_ : List[Any] = False
A_ : List[str] = False
def __UpperCamelCase ( self : Any ) -> List[Any]:
A = LevitModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : Dict ) -> Tuple:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def __UpperCamelCase ( self : Dict ) -> int:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason='Levit does not output attentions' )
def __UpperCamelCase ( self : int ) -> List[str]:
pass
def __UpperCamelCase ( self : str ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
def check_hidden_states_output(__UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Any ):
A = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A = outputs.hidden_states
A = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
A = (self.model_tester.image_size, self.model_tester.image_size)
A , A = image_size[0], image_size[1]
for _ in range(4 ):
A = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
A = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
pass
def __UpperCamelCase ( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> str:
A = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self : Any ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Any:
if not self.model_tester.is_training:
return
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
A = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A = model(**__UpperCamelCase ).loss
loss.backward()
def __UpperCamelCase ( self : Tuple ) -> str:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A = False
A = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
A = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A = model(**__UpperCamelCase ).loss
loss.backward()
def __UpperCamelCase ( self : List[str] ) -> int:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
A = problem_type['title']
A = problem_type['num_labels']
A = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
A = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
A = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
A = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __UpperCamelCase ( self : Dict ) -> List[str]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = LevitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Dict ) -> List[Any]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self : List[str] ) -> List[str]:
A = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 224
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__snake_case :Any ={'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] =['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Any =['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__snake_case :Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 224
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase ):
_lowerCamelCase = StableDiffusionXLImgaImgPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=_a , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__magic_name__ = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__magic_name__ = CLIPTextModel(_a )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_a )
__magic_name__ = CLIPTextModelWithProjection(_a )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_a )
__magic_name__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
__magic_name__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__magic_name__ = image / 2 + 0.5
if str(_a ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(_a )
else:
__magic_name__ = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.7_5,
}
return inputs
def lowerCAmelCase__ ( self ):
__magic_name__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = StableDiffusionXLImgaImgPipeline(**_a )
__magic_name__ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__magic_name__ = self.get_dummy_inputs(_a )
__magic_name__ = sd_pipe(**_a ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_dummy_components()
__magic_name__ = StableDiffusionXLImgaImgPipeline(**_a )
__magic_name__ = sd_pipe.to(_a )
__magic_name__ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
# forward without prompt embeds
__magic_name__ = self.get_dummy_inputs(_a )
__magic_name__ = 3 * ['this is a negative prompt']
__magic_name__ = negative_prompt
__magic_name__ = 3 * [inputs['prompt']]
__magic_name__ = sd_pipe(**_a )
__magic_name__ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__magic_name__ = self.get_dummy_inputs(_a )
__magic_name__ = 3 * ['this is a negative prompt']
__magic_name__ = 3 * [inputs.pop('''prompt''' )]
(
__magic_name__
) = sd_pipe.encode_prompt(_a , negative_prompt=_a )
__magic_name__ = sd_pipe(
**_a , prompt_embeds=_a , negative_prompt_embeds=_a , pooled_prompt_embeds=_a , negative_pooled_prompt_embeds=_a , )
__magic_name__ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_="cpu" , UpperCamelCase_=torch.floataa , UpperCamelCase_=0 ):
__magic_name__ = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
__magic_name__ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
__magic_name__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase__ ( self ):
__magic_name__ = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ = self.get_inputs(_a )
__magic_name__ = pipe(**_a ).images
__magic_name__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__magic_name__ = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 490
|
'''simple docstring'''
import operator as op
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = []
_a : List[str] = lambda __a , __a : int(x / y ) # noqa: E731 integer division operation
_a : List[Any] = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(1_2 ) , 'Stack' , sep=' | ' )
print('-' * (3_0 + len(__a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
else:
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
stack.append(
str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__lowerCAmelCase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 229
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class A_ ( snake_case_ ):
UpperCAmelCase__ = ['''image_processor''', '''feature_extractor''']
UpperCAmelCase__ = '''TvltImageProcessor'''
UpperCAmelCase__ = '''TvltFeatureExtractor'''
def __init__( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> List[str]:
super().__init__(image_processor=__lowerCamelCase , feature_extractor=__lowerCamelCase )
__magic_name__ = image_processor
__magic_name__ = feature_extractor
def __call__( self : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , *__lowerCamelCase : int , **__lowerCamelCase : str , ) -> Tuple:
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
__magic_name__ = None
if images is not None:
__magic_name__ = self.image_processor(__lowerCamelCase , mask_pixel=__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if images_mixed is not None:
__magic_name__ = self.image_processor(__lowerCamelCase , is_mixed=__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if audio is not None:
__magic_name__ = self.feature_extractor(
__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , mask_audio=__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = {}
if audio is not None:
output_dict.update(__lowerCamelCase )
if images is not None:
output_dict.update(__lowerCamelCase )
if images_mixed_dict is not None:
output_dict.update(__lowerCamelCase )
return output_dict
@property
def _snake_case ( self : Any ) -> Dict:
__magic_name__ = self.image_processor.model_input_names
__magic_name__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 468
|
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:list ):
'''simple docstring'''
__magic_name__ = len(__lowerCamelCase )
for i in range(1 , __lowerCamelCase ):
__magic_name__ = collection[i]
__magic_name__ = 0
__magic_name__ = i - 1
while low <= high:
__magic_name__ = (low + high) // 2
if val < collection[mid]:
__magic_name__ = mid - 1
else:
__magic_name__ = mid + 1
for j in range(__lowerCamelCase , __lowerCamelCase , -1 ):
__magic_name__ = collection[j - 1]
__magic_name__ = val
return collection
if __name__ == "__main__":
lowercase = input('''Enter numbers separated by a comma:\n''').strip()
lowercase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 468
| 1
|
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
import re
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall("""[ATCG]""" , _lowerCAmelCase ) ) != len(_lowerCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A_ = 192
A_ = 768
A_ = 12
A_ = 3
A_ = [800, 1333]
A_ = False
elif yolos_name == "yolos_s_dWr":
A_ = 330
A_ = 14
A_ = 6
A_ = 1320
elif "yolos_s" in yolos_name:
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
elif "yolos_b" in yolos_name:
A_ = [800, 1344]
A_ = 91
A_ = '''huggingface/label-files'''
A_ = '''coco-detection-id2label.json'''
A_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
A_ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
A_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[: config.hidden_size, :]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[-config.hidden_size :, :]
A_ = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if "backbone" in name:
A_ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
A_ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
A_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
A_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
A_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
A_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
A_ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
A_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
A_ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
A_ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
A_ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
A_ = key.split('''.''' )
A_ = int(key_split[2] )
A_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A_ = val[:dim, :]
A_ = val[
dim : dim * 2, :
]
A_ = val[-dim:, :]
else:
A_ = val[:dim]
A_ = val[dim : dim * 2]
A_ = val[-dim:]
else:
A_ = val
return orig_state_dict
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
A_ = get_yolos_config(SCREAMING_SNAKE_CASE )
# load original state_dict
A_ = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
# load 🤗 model
A_ = YolosForObjectDetection(SCREAMING_SNAKE_CASE )
model.eval()
A_ = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by YolosImageProcessor
A_ = 800 if yolos_name != '''yolos_ti''' else 512
A_ = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE )
A_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A_ = model(**SCREAMING_SNAKE_CASE )
A_ ,A_ = outputs.logits, outputs.pred_boxes
A_ ,A_ = None, None
if yolos_name == "yolos_ti":
A_ = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
A_ = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
A_ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
A_ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
A_ = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
A_ = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
A_ = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
A_ = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
A_ = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
A_ = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
A_ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
A_ = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE , organization='''hustvl''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 563
|
from __future__ import annotations
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : int ):
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : str , **a__ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __snake_case ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Any ):
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : Tuple ):
UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=a__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(a__ , return_tensors='''np''' )
UpperCAmelCase = processor(images=a__ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def __snake_case ( self : Any ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=a__ )
UpperCAmelCase = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(a__ , a__ , a__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = processor.post_process_masks(
a__ , torch.tensor(a__ ) , torch.tensor(a__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase = processor.post_process_masks(a__ , np.array(a__ ) , np.array(a__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(a__ ):
UpperCAmelCase = processor.post_process_masks(a__ , np.array(a__ ) , np.array(a__ ) )
@require_vision
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : List[str] ):
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[int] , **a__ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __snake_case ( self : int ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : int ):
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : str ):
UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=a__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(a__ , return_tensors='''np''' )
UpperCAmelCase = processor(images=a__ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=a__ )
UpperCAmelCase = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(a__ , a__ , a__ , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = processor.post_process_masks(
a__ , tf.convert_to_tensor(a__ ) , tf.convert_to_tensor(a__ ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase = processor.post_process_masks(
a__ , np.array(a__ ) , np.array(a__ ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase = processor.post_process_masks(
a__ , np.array(a__ ) , np.array(a__ ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : List[str] ):
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = SamImageProcessor()
UpperCAmelCase = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Union[str, Any] , **a__ : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __snake_case ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=a__ )
UpperCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase = [tf.convert_to_tensor(a__ )]
UpperCAmelCase = [torch.tensor(a__ )]
UpperCAmelCase = [[1764, 2646]]
UpperCAmelCase = [[683, 1024]]
UpperCAmelCase = processor.post_process_masks(
a__ , a__ , a__ , return_tensors='''tf''' )
UpperCAmelCase = processor.post_process_masks(
a__ , a__ , a__ , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = SamProcessor(image_processor=a__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(a__ , return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCAmelCase = processor(images=a__ , return_tensors='''pt''' )['''pixel_values'''].numpy()
UpperCAmelCase = image_processor(a__ , return_tensors='''tf''' )['''pixel_values'''].numpy()
UpperCAmelCase = processor(images=a__ , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(a__ , a__ ) )
self.assertTrue(np.allclose(a__ , a__ ) )
self.assertTrue(np.allclose(a__ , a__ ) )
| 51
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51
| 1
|
from __future__ import annotations
def UpperCAmelCase ( UpperCamelCase__ ) -> int:
'''simple docstring'''
if not nums:
return 0
__lowerCAmelCase = nums[0]
__lowerCAmelCase = 0
for num in nums[1:]:
__lowerCAmelCase = (
max_excluding + num,
max(_lowercase , _lowercase ),
)
return max(_lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A : Optional[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
__A : int = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
__A : Optional[Any] = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
__A : Union[str, Any] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A : int = [0] * args.vocab_size
for k, v in counter.items():
__A : Optional[int] = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 334
| 0
|
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _snake_case ( __UpperCAmelCase ):
lowerCAmelCase :List[Any] = ['image_processor']
lowerCAmelCase :Any = 'SamImageProcessor'
def __init__( self , _lowerCamelCase):
super().__init__(snake_case__)
UpperCAmelCase__ : Optional[Any] = self.image_processor
UpperCAmelCase__ : Any = -10
UpperCAmelCase__ : Tuple = self.image_processor.size["""longest_edge"""]
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : Dict = self.image_processor(
snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# pop arguments that are not used in the foward but used nevertheless
UpperCAmelCase__ : Union[str, Any] = encoding_image_processor["""original_sizes"""]
if hasattr(snake_case__ , """numpy"""): # Checks if Torch or TF tensor
UpperCAmelCase__ : List[Any] = original_sizes.numpy()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = self._check_and_preprocess_points(
input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , )
UpperCAmelCase__ : str = self._normalize_and_convert(
snake_case__ , snake_case__ , input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , return_tensors=snake_case__ , )
return encoding_image_processor
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="pt" , ):
if input_points is not None:
if len(snake_case__) != len(snake_case__):
UpperCAmelCase__ : Any = [
self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0]) for point in input_points
]
else:
UpperCAmelCase__ : int = [
self._normalize_coordinates(self.target_size , snake_case__ , snake_case__)
for point, original_size in zip(snake_case__ , snake_case__)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._pad_points_and_labels(snake_case__ , snake_case__)
UpperCAmelCase__ : Dict = np.array(snake_case__)
if input_labels is not None:
UpperCAmelCase__ : Optional[int] = np.array(snake_case__)
if input_boxes is not None:
if len(snake_case__) != len(snake_case__):
UpperCAmelCase__ : str = [
self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] , is_bounding_box=snake_case__)
for box in input_boxes
]
else:
UpperCAmelCase__ : Any = [
self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ , is_bounding_box=snake_case__)
for box, original_size in zip(snake_case__ , snake_case__)
]
UpperCAmelCase__ : Tuple = np.array(snake_case__)
if input_boxes is not None:
if return_tensors == "pt":
UpperCAmelCase__ : Dict = torch.from_numpy(snake_case__)
# boxes batch size of 1 by default
UpperCAmelCase__ : Union[str, Any] = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
UpperCAmelCase__ : Optional[int] = tf.convert_to_tensor(snake_case__)
# boxes batch size of 1 by default
UpperCAmelCase__ : List[Any] = tf.expand_dims(snake_case__ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes})
if input_points is not None:
if return_tensors == "pt":
UpperCAmelCase__ : Any = torch.from_numpy(snake_case__)
# point batch size of 1 by default
UpperCAmelCase__ : Optional[int] = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
UpperCAmelCase__ : Optional[int] = tf.convert_to_tensor(snake_case__)
# point batch size of 1 by default
UpperCAmelCase__ : List[str] = tf.expand_dims(snake_case__ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points})
if input_labels is not None:
if return_tensors == "pt":
UpperCAmelCase__ : Optional[Any] = torch.from_numpy(snake_case__)
# point batch size of 1 by default
UpperCAmelCase__ : int = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
UpperCAmelCase__ : Dict = tf.convert_to_tensor(snake_case__)
# point batch size of 1 by default
UpperCAmelCase__ : Optional[int] = tf.expand_dims(snake_case__ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels})
return encoding_image_processor
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[str] = max([point.shape[0] for point in input_points])
UpperCAmelCase__ : List[Any] = []
for i, point in enumerate(snake_case__):
if point.shape[0] != expected_nb_points:
UpperCAmelCase__ : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
UpperCAmelCase__ : str = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(snake_case__)
UpperCAmelCase__ : int = processed_input_points
return input_points, input_labels
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False):
UpperCAmelCase__ , UpperCAmelCase__ : str = original_size
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor._get_preprocess_shape(snake_case__ , longest_edge=snake_case__)
UpperCAmelCase__ : List[str] = deepcopy(snake_case__).astype(snake_case__)
if is_bounding_box:
UpperCAmelCase__ : int = coords.reshape(-1 , 2 , 2)
UpperCAmelCase__ : Dict = coords[..., 0] * (new_w / old_w)
UpperCAmelCase__ : Optional[int] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCAmelCase__ : Optional[Any] = coords.reshape(-1 , 4)
return coords
def snake_case__ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
if input_points is not None:
if hasattr(snake_case__ , """numpy"""): # Checks for TF or Torch tensor
UpperCAmelCase__ : Optional[int] = input_points.numpy().tolist()
if not isinstance(snake_case__ , snake_case__) or not isinstance(input_points[0] , snake_case__):
raise ValueError("""Input points must be a list of list of floating points.""")
UpperCAmelCase__ : List[str] = [np.array(snake_case__) for input_point in input_points]
else:
UpperCAmelCase__ : Optional[Any] = None
if input_labels is not None:
if hasattr(snake_case__ , """numpy"""):
UpperCAmelCase__ : Optional[Any] = input_labels.numpy().tolist()
if not isinstance(snake_case__ , snake_case__) or not isinstance(input_labels[0] , snake_case__):
raise ValueError("""Input labels must be a list of list integers.""")
UpperCAmelCase__ : Optional[int] = [np.array(snake_case__) for label in input_labels]
else:
UpperCAmelCase__ : Union[str, Any] = None
if input_boxes is not None:
if hasattr(snake_case__ , """numpy"""):
UpperCAmelCase__ : List[Any] = input_boxes.numpy().tolist()
if (
not isinstance(snake_case__ , snake_case__)
or not isinstance(input_boxes[0] , snake_case__)
or not isinstance(input_boxes[0][0] , snake_case__)
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""")
UpperCAmelCase__ : int = [np.array(snake_case__).astype(np.floataa) for box in input_boxes]
else:
UpperCAmelCase__ : int = None
return input_points, input_labels, input_boxes
@property
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(snake_case__))
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.image_processor.post_process_masks(*snake_case__ , **snake_case__)
| 407
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 611
| 0
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__a = len(__SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
__a = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__a = arr[mi::-1] + arr[mi + 1 : len(__SCREAMING_SNAKE_CASE )]
# Reverse whole list
__a = arr[cur - 1 :: -1] + arr[cur : len(__SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 201
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201
| 1
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def a ( __UpperCAmelCase : str , __UpperCAmelCase : str = "cpu" , __UpperCAmelCase : Union[str, None] = None ) -> None:
__magic_name__: List[Any] = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__UpperCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
__magic_name__: Any = v.half()
if save_path is None: # overwrite src_path
__magic_name__: List[str] = src_path
torch.save(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 96
|
"""simple docstring"""
def a ( __UpperCAmelCase : List[Any] ) -> str:
__magic_name__: Optional[int] = [0] * len(__UpperCAmelCase )
__magic_name__: str = []
__magic_name__: Any = []
__magic_name__: Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCAmelCase )
while queue:
__magic_name__: Optional[Any] = queue.pop(0 )
cnt += 1
topo.append(__UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__UpperCAmelCase )
if cnt != len(__UpperCAmelCase ):
print("""Cycle exists""" )
else:
print(__UpperCAmelCase )
# Adjacency List of Graph
__lowerCamelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 96
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase( UpperCAmelCase__ ):
__snake_case : int = VOCAB_FILES_NAMES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
__snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = LxmertTokenizer
def __init__( self : int , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]="[UNK]" , SCREAMING_SNAKE_CASE : str="[SEP]" , SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , SCREAMING_SNAKE_CASE : Tuple="[CLS]" , SCREAMING_SNAKE_CASE : List[Any]="[MASK]" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Tuple=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ :Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ :Optional[int] = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ :Dict = do_lower_case
SCREAMING_SNAKE_CASE_ :Union[str, Any] = strip_accents
SCREAMING_SNAKE_CASE_ :str = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ :Optional[int] = normalizer_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = do_lower_case
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
| 714
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 233
| 0
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowercase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : int = StableDiffusionControlNetImgaImgPipeline
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"})
a__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCAmelCase_= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCAmelCase_= DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCAmelCase_= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCAmelCase_= CLIPTextModel(_a )
UpperCAmelCase_= CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase_= {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str]=0 ) -> int:
if str(_a ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(_a )
else:
UpperCAmelCase_= torch.Generator(device=_a ).manual_seed(_a )
UpperCAmelCase_= 2
UpperCAmelCase_= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
UpperCAmelCase_= floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_= Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
UpperCAmelCase_= {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class lowercase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Optional[Any] = StableDiffusionControlNetImgaImgPipeline
a__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Tuple = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__UpperCAmelCase : Dict ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase_= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCAmelCase_= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCAmelCase_= DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCAmelCase_= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCAmelCase_= CLIPTextModel(_a )
UpperCAmelCase_= CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase_= MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase_= {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any=0 ) -> Dict:
if str(_a ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(_a )
else:
UpperCAmelCase_= torch.Generator(device=_a ).manual_seed(_a )
UpperCAmelCase_= 2
UpperCAmelCase_= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
UpperCAmelCase_= floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_= Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
UpperCAmelCase_= {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= self.pipeline_class(**_a )
pipe.to(_a )
UpperCAmelCase_= 10.0
UpperCAmelCase_= 4
UpperCAmelCase_= self.get_dummy_inputs(_a )
UpperCAmelCase_= steps
UpperCAmelCase_= scale
UpperCAmelCase_= pipe(**_a )[0]
UpperCAmelCase_= self.get_dummy_inputs(_a )
UpperCAmelCase_= steps
UpperCAmelCase_= scale
UpperCAmelCase_= pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCAmelCase_= self.get_dummy_inputs(_a )
UpperCAmelCase_= steps
UpperCAmelCase_= scale
UpperCAmelCase_= pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase_= self.get_dummy_inputs(_a )
UpperCAmelCase_= steps
UpperCAmelCase_= scale
UpperCAmelCase_= pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_= ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
UpperCAmelCase_= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_= torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase_= """evil space-punk bird"""
UpperCAmelCase_= load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
UpperCAmelCase_= load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
UpperCAmelCase_= pipe(
_a , _a , control_image=_a , generator=_a , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_= output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_= load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 593
|
from typing import Any
import numpy as np
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ):
'''simple docstring'''
return np.array_equal(UpperCamelCase__, matrix.conjugate().T )
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray, UpperCamelCase__ : np.ndarray ):
'''simple docstring'''
UpperCamelCase__ = v.conjugate().T
UpperCamelCase__ = v_star.dot(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, np.ndarray )
return (v_star_dot.dot(UpperCamelCase__ )) / (v_star.dot(UpperCamelCase__ ))
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCamelCase__ = np.array([[1], [2], [3]] )
assert is_hermitian(UpperCamelCase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(UpperCamelCase__, UpperCamelCase__ ) )
UpperCamelCase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(UpperCamelCase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(UpperCamelCase__, UpperCamelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 240
| 0
|
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
a : Optional[int] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = 5_0257 , UpperCamelCase = 1024 , UpperCamelCase = 768 , UpperCamelCase = 12 , UpperCamelCase = 12 , UpperCamelCase = None , UpperCamelCase = "gelu_new" , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 1E-5 , UpperCamelCase = 0.02 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = False , UpperCamelCase = False , ) -> Tuple:
super().__init__()
__lowerCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
__lowerCAmelCase = prefix_inner_dim
__lowerCAmelCase = prefix_hidden_dim
__lowerCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__lowerCAmelCase = (
nn.Linear(self.prefix_hidden_dim , UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__lowerCAmelCase = GPTaConfig(
vocab_size=UpperCamelCase , n_positions=UpperCamelCase , n_embd=UpperCamelCase , n_layer=UpperCamelCase , n_head=UpperCamelCase , n_inner=UpperCamelCase , activation_function=UpperCamelCase , resid_pdrop=UpperCamelCase , embd_pdrop=UpperCamelCase , attn_pdrop=UpperCamelCase , layer_norm_epsilon=UpperCamelCase , initializer_range=UpperCamelCase , scale_attn_weights=UpperCamelCase , use_cache=UpperCamelCase , scale_attn_by_inverse_layer_idx=UpperCamelCase , reorder_and_upcast_attn=UpperCamelCase , )
__lowerCAmelCase = GPTaLMHeadModel(UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , ) -> str:
__lowerCAmelCase = self.transformer.transformer.wte(UpperCamelCase )
__lowerCAmelCase = self.encode_prefix(UpperCamelCase )
__lowerCAmelCase = self.decode_prefix(UpperCamelCase )
__lowerCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__lowerCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__lowerCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
__lowerCAmelCase = self.transformer(inputs_embeds=UpperCamelCase , labels=UpperCamelCase , attention_mask=UpperCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> torch.Tensor:
return torch.zeros(UpperCamelCase , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[Any]:
return self.encode_prefix(UpperCamelCase )
@torch.no_grad()
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
__lowerCAmelCase = torch.split(UpperCamelCase , 1 , dim=0 )
__lowerCAmelCase = []
__lowerCAmelCase = []
for feature in features:
__lowerCAmelCase = self.decode_prefix(feature.to(UpperCamelCase ) ) # back to the clip feature
# Only support beam search for now
__lowerCAmelCase , __lowerCAmelCase = self.generate_beam(
input_embeds=UpperCamelCase , device=UpperCamelCase , eos_token_id=UpperCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__lowerCAmelCase = torch.stack(UpperCamelCase )
__lowerCAmelCase = torch.stack(UpperCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase_ ( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase = 5 , UpperCamelCase = 67 , UpperCamelCase = 1.0 , UpperCamelCase = None , ) -> str:
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = torch.ones(UpperCamelCase , device=UpperCamelCase , dtype=torch.int )
__lowerCAmelCase = torch.zeros(UpperCamelCase , device=UpperCamelCase , dtype=torch.bool )
if input_embeds is not None:
__lowerCAmelCase = input_embeds
else:
__lowerCAmelCase = self.transformer.transformer.wte(UpperCamelCase )
for i in range(UpperCamelCase ):
__lowerCAmelCase = self.transformer(inputs_embeds=UpperCamelCase )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__lowerCAmelCase = logits.softmax(-1 ).log()
if scores is None:
__lowerCAmelCase , __lowerCAmelCase = logits.topk(UpperCamelCase , -1 )
__lowerCAmelCase = generated.expand(UpperCamelCase , *generated.shape[1:] )
__lowerCAmelCase , __lowerCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__lowerCAmelCase = next_tokens
else:
__lowerCAmelCase = tokens.expand(UpperCamelCase , *tokens.shape[1:] )
__lowerCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
__lowerCAmelCase = -float(np.inf )
__lowerCAmelCase = 0
__lowerCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__lowerCAmelCase = scores_sum / seq_lengths[:, None]
__lowerCAmelCase , __lowerCAmelCase = scores_sum_average.view(-1 ).topk(UpperCamelCase , -1 )
__lowerCAmelCase = next_tokens // scores_sum.shape[1]
__lowerCAmelCase = seq_lengths[next_tokens_source]
__lowerCAmelCase = next_tokens % scores_sum.shape[1]
__lowerCAmelCase = next_tokens.unsqueeze(1 )
__lowerCAmelCase = tokens[next_tokens_source]
__lowerCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
__lowerCAmelCase = generated[next_tokens_source]
__lowerCAmelCase = scores_sum_average * seq_lengths
__lowerCAmelCase = is_stopped[next_tokens_source]
__lowerCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__lowerCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
__lowerCAmelCase = is_stopped + next_tokens.eq(UpperCamelCase ).squeeze()
if is_stopped.all():
break
__lowerCAmelCase = scores / seq_lengths
__lowerCAmelCase = scores.argsort(descending=UpperCamelCase )
# tokens tensors are already padded to max_seq_length
__lowerCAmelCase = [tokens[i] for i in order]
__lowerCAmelCase = torch.stack(UpperCamelCase , dim=0 )
__lowerCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 39
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase : Optional[Any] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
lowerCAmelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Dict = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Dict = PRETRAINED_INIT_CONFIGURATION
a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = SqueezeBertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> List[Any]:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(UpperCamelCase , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**UpperCamelCase )
__lowerCAmelCase = do_lower_case
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None ) -> str:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 39
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=13 , lowerCAmelCase__ : Dict=7 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=99 , lowerCAmelCase__ : Dict=[1, 1, 2] , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Union[str, Any]=32 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : int=8 , lowerCAmelCase__ : Dict=37 , lowerCAmelCase__ : Tuple="gelu_new" , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : int=5_12 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[int]=4 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Tuple=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Any = block_sizes
SCREAMING_SNAKE_CASE : List[Any] = num_decoder_layers
SCREAMING_SNAKE_CASE : int = d_model
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : Optional[int] = d_head
SCREAMING_SNAKE_CASE : Dict = d_inner
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : Dict = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE : Optional[int] = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE : Optional[int] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE : List[str] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE : Dict = self.num_hidden_layers + 2
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Optional[Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TFFunnelModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : int = TFFunnelModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = TFFunnelBaseModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : int = TFFunnelBaseModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelBaseModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __lowercase ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = TFFunnelForPreTraining(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TFFunnelForMaskedLM(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelForSequenceClassification(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.num_choices
SCREAMING_SNAKE_CASE : Dict = TFFunnelForMultipleChoice(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = TFFunnelForTokenClassification(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TFFunnelForQuestionAnswering(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowerCAmelCase : str = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : int = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase : int = False
_lowerCAmelCase : Optional[int] = False
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@require_tf
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_lowerCAmelCase : Any = False
_lowerCAmelCase : int = False
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = TFFunnelModelTester(self , base=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase__ )
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
| 527
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase_ : Tuple = 'docs/source/en/_toctree.yml'
def UpperCAmelCase ( A : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = defaultdict(A )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE : Tuple = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE : Tuple = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE : Optional[int] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(A ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(A , key=lambda A : s["title"].lower() )
def UpperCAmelCase ( A : Any=False ):
with open(A , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : str = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE : Dict = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE : Dict = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE : List[Any] = [(idx, section) for idx, section in enumerate(A ) if '''sections''' in section]
SCREAMING_SNAKE_CASE : int = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE : List[str] = modality_doc['''sections''']
SCREAMING_SNAKE_CASE : List[str] = clean_model_doc_toc(A )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE : Dict = True
if overwrite:
SCREAMING_SNAKE_CASE : int = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE : List[Any] = model_doc
SCREAMING_SNAKE_CASE : Union[str, Any] = api_doc
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(A , allow_unicode=A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase_ : Any = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 527
| 1
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def snake_case_ ( *a__ , **a__):
pass
def lowerCAmelCase__ ( UpperCamelCase_ : Image )-> str:
A__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case_ ( self , a__ , a__ , a__):
A__ = DepthEstimationPipeline(model=a__ , image_processor=a__)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case_ ( self , a__ , a__):
A__ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , a__)
import datasets
A__ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''')
A__ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
])
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
] , a__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''')
def snake_case_ ( self):
pass
@slow
@require_torch
def snake_case_ ( self):
A__ = '''Intel/dpt-large'''
A__ = pipeline('''depth-estimation''' , model=a__)
A__ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''')
A__ = hashimage(outputs['''depth'''])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 2_9.3_0_4)
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_6_2)
@require_torch
def snake_case_ ( self):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
| 702
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _UpperCAmelCase :
def __init__( self , a__ , ):
A__ = parent
A__ = 1_3
A__ = 7
A__ = True
A__ = True
A__ = True
A__ = True
A__ = True
A__ = False
A__ = False
A__ = False
A__ = 2
A__ = 9_9
A__ = 0
A__ = 3_2
A__ = 2
A__ = 4
A__ = 0.1
A__ = 0.1
A__ = 5_1_2
A__ = 1_6
A__ = 2
A__ = 0.0_2
A__ = 3
A__ = 4
A__ = '''last'''
A__ = True
A__ = None
A__ = 0
def snake_case_ ( self):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa)
A__ = None
if self.use_input_lengths:
A__ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = TFFlaubertModel(config=a__)
A__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
A__ = model(a__)
A__ = [input_ids, input_mask]
A__ = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = TFFlaubertWithLMHeadModel(a__)
A__ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
A__ = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = TFFlaubertForQuestionAnsweringSimple(a__)
A__ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
A__ = model(a__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = TFFlaubertForSequenceClassification(a__)
A__ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
A__ = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = self.num_labels
A__ = TFFlaubertForTokenClassification(config=a__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = self.num_choices
A__ = TFFlaubertForMultipleChoice(config=a__)
A__ = tf.tile(tf.expand_dims(a__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(a__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(a__ , 1) , (1, self.num_choices, 1))
A__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
UpperCamelCase__ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase__ = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case_ ( self):
A__ = TFFlaubertModelTester(self)
A__ = ConfigTester(self , config_class=a__ , emb_dim=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*a__)
@slow
def snake_case_ ( self):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFFlaubertModel.from_pretrained(a__)
self.assertIsNotNone(a__)
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def snake_case_ ( self):
A__ = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''')
A__ = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A__ = model(a__)[0]
A__ = tf.TensorShape((1, 8, 5_1_2))
self.assertEqual(output.shape , a__)
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 526
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : Tuple = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 555
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE = 10**9 ) -> int:
"""simple docstring"""
__a = 1
__a = 2
__a = 0
__a = 0
__a = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__a = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 582
| 0
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowercase = datasets.load_iris()
__lowercase = np.array(data["""data"""])
__lowercase = np.array(data["""target"""])
__lowercase = data["target_names"]
__lowercase = train_test_split(X, y)
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def lowercase ( A_ , A_ , A_ , A_ , A_=5 )-> int:
'''simple docstring'''
a : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
a : int = []
for data_point in data:
a : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 710
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _A ( _a ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : KarrasDiffusionSchedulers , __UpperCAmelCase : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=__UpperCAmelCase , vae=__UpperCAmelCase , scheduler=__UpperCAmelCase)
# create a imagenet -> id dictionary for easier use
a : List[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(","):
a : Any = int(__UpperCAmelCase)
a : str = dict(sorted(self.labels.items()))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Union[str, List[str]]):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = list(__UpperCAmelCase)
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''')
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : float = 4.0 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : int = 50 , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ):
a : str = len(__UpperCAmelCase)
a : List[str] = self.transformer.config.sample_size
a : List[str] = self.transformer.config.in_channels
a : Optional[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__UpperCAmelCase , device=self.device , dtype=self.transformer.dtype , )
a : Any = torch.cat([latents] * 2) if guidance_scale > 1 else latents
a : str = torch.tensor(__UpperCAmelCase , device=self.device).reshape(-1)
a : str = torch.tensor([1000] * batch_size , device=self.device)
a : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
a : Any = latent_model_input[: len(__UpperCAmelCase) // 2]
a : Optional[Any] = torch.cat([half, half] , dim=0)
a : List[str] = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = t
if not torch.is_tensor(__UpperCAmelCase):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
a : List[str] = latent_model_input.device.type == "mps"
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : List[str] = torch.floataa if is_mps else torch.floataa
else:
a : List[Any] = torch.intaa if is_mps else torch.intaa
a : Any = torch.tensor([timesteps] , dtype=__UpperCAmelCase , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
a : List[Any] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a : Optional[Any] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
a : str = self.transformer(
__UpperCAmelCase , timestep=__UpperCAmelCase , class_labels=__UpperCAmelCase).sample
# perform guidance
if guidance_scale > 1:
a , a : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
a , a : Dict = torch.split(__UpperCAmelCase , len(__UpperCAmelCase) // 2 , dim=0)
a : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
a : List[Any] = torch.cat([half_eps, half_eps] , dim=0)
a : List[str] = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
a , a : Optional[int] = torch.split(__UpperCAmelCase , __UpperCAmelCase , dim=1)
else:
a : int = noise_pred
# compute previous image: x_t -> x_t-1
a : List[str] = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
if guidance_scale > 1:
a , a : Union[str, Any] = latent_model_input.chunk(2 , dim=0)
else:
a : Any = latent_model_input
a : str = 1 / self.vae.config.scaling_factor * latents
a : Dict = self.vae.decode(__UpperCAmelCase).sample
a : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
a : Tuple = self.numpy_to_pil(__UpperCAmelCase)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__UpperCAmelCase)
| 135
| 0
|
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Dict = '''__DUMMY_TRANSFORMERS_USER__'''
_lowerCAmelCase : Optional[int] = '''Dummy User'''
_lowerCAmelCase : Dict = '''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'''
_lowerCAmelCase : Optional[int] = '''https://hub-ci.huggingface.co'''
_lowerCAmelCase : str = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowerCAmelCase : Union[str, Any] = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowerCAmelCase : Tuple = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCAmelCase_ )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCAmelCase_ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCAmelCase_ )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCAmelCase_ )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
HfFolder.save_token(lowerCAmelCase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
return HfApi(endpoint=lowerCAmelCase_ )
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[str] = HfFolder.get_token()
HfFolder.save_token(lowerCAmelCase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCAmelCase_ )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase ):
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(lowerCAmelCase_ )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : str = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data/text_data.txt" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : Optional[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Dict = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : str = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ )
hf_api.upload_file(
token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 46
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : str = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "xlm-roberta"
def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 682
| 0
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def _a ( self , **_a ) -> List[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _a ( self , **_a ) -> List[Any]:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def _a ( self , **_a ) -> Any:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Tuple = AlignProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
SCREAMING_SNAKE_CASE__ : int = AlignProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Dict = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] = AlignProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(images=_a , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = AlignProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = """lower newer"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(text=_a )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AlignProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """lower newer"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = AlignProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : List[Any] = processor.batch_decode(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict = AlignProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = """lower newer"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[str] = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 12
| 0
|
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowerCAmelCase = remove_duplicates(key.upper() )
__lowerCAmelCase = len(lowerCAmelCase_ )
# First fill cipher with key characters
__lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(lowerCAmelCase_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCAmelCase_ ), 26 ):
__lowerCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowerCAmelCase = alphabet[i - offset]
__lowerCAmelCase = char
return cipher_alphabet
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : dict[str, str] ):
return "".join(cipher_map.get(lowerCAmelCase_, lowerCAmelCase_ ) for ch in message.upper() )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : dict[str, str] ):
__lowerCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCAmelCase_, lowerCAmelCase_ ) for ch in message.upper() )
def a_ ( ):
__lowerCAmelCase = input('Enter message to encode or decode: ' ).strip()
__lowerCAmelCase = input('Enter keyword: ' ).strip()
__lowerCAmelCase = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
__lowerCAmelCase = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
__lowerCAmelCase = create_cipher_map(lowerCAmelCase_ )
print(func(lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2
| 0
|
'''simple docstring'''
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = multiprocessing.Manager()
_a = manager.list()
_a = multiprocessing.Process(target=UpperCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : List[str] ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_a = shutil.rmtree
_a = os.rmdir
_a = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_a = {}
with swallow_io():
with time_limit(UpperCamelCase ):
exec(UpperCamelCase , UpperCamelCase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f'failed: {e}' )
# Needed for cleaning up.
_a = rmtree
_a = rmdir
_a = chdir
@contextlib.contextmanager
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
def signal_handler(UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , UpperCamelCase )
signal.signal(signal.SIGALRM , UpperCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def snake_case_ ():
'''simple docstring'''
_a = WriteOnlyStringIO()
with contextlib.redirect_stdout(UpperCamelCase ):
with contextlib.redirect_stderr(UpperCamelCase ):
with redirect_stdin(UpperCamelCase ):
yield
@contextlib.contextmanager
def snake_case_ ():
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(UpperCamelCase ):
yield dirname
class A ( _a ):
pass
class A ( io.StringIO ):
def __lowerCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[int] ) -> int:
"""simple docstring"""
raise OSError
def __lowerCAmelCase ( self : Optional[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
raise OSError
def __lowerCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def __lowerCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
return False
class A ( contextlib._RedirectStream ): # type: ignore
lowercase_ = 'stdin'
@contextlib.contextmanager
def snake_case_ (UpperCamelCase : List[Any] ):
'''simple docstring'''
if root == ".":
yield
return
_a = os.getcwd()
os.chdir(UpperCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(UpperCamelCase )
def snake_case_ (UpperCamelCase : List[str]=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_a = None
_a = None
import os
_a = '''1'''
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
_a = None
import shutil
_a = None
_a = None
_a = None
import subprocess
_a = None # type: ignore
_a = None
import sys
_a = None
_a = None
_a = None
_a = None
_a = None
| 703
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_snake_case : str = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class A ( unittest.TestCase ,_a ):
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_a = load_tool('''text-question-answering''' )
self.tool.setup()
_a = load_tool('''text-question-answering''' , remote=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_a = self.tool(lowerCAmelCase_ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase_ , '''launched the BigScience Research Workshop''' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_a = self.remote_tool(lowerCAmelCase_ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase_ , '''launched the BigScience Research Workshop''' )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.tool(text=lowerCAmelCase_ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase_ , '''launched the BigScience Research Workshop''' )
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = self.remote_tool(text=lowerCAmelCase_ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase_ , '''launched the BigScience Research Workshop''' )
| 377
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case = None , snake_case = None , snake_case = True , snake_case = None , snake_case = False , snake_case = None , snake_case = True , snake_case = "arrow" , **snake_case , ):
super().__init__(
split=snake_case , features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , **snake_case , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=snake_case , features=snake_case , cache_dir=snake_case , working_dir=snake_case , **snake_case , )
def SCREAMING_SNAKE_CASE__ ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=snake_case , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 84
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84
| 1
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = hf_hub_url(repo_id=_SCREAMING_SNAKE_CASE , path=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_SCREAMING_SNAKE_CASE )}"""
| 702
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : str = ""
else:
_UpperCAmelCase : Any = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : str = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = dct.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = val
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ViTMSNConfig()
_UpperCAmelCase : int = 1_0_0_0
_UpperCAmelCase : str = "datasets/huggingface/label-files"
_UpperCAmelCase : int = "imagenet-1k-id2label.json"
_UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , "r" ) )
_UpperCAmelCase : Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[Any] = idalabel
_UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = 3_8_4
_UpperCAmelCase : Any = 1_5_3_6
_UpperCAmelCase : Optional[Any] = 6
elif "l16" in checkpoint_url:
_UpperCAmelCase : Optional[int] = 1_0_2_4
_UpperCAmelCase : Union[str, Any] = 4_0_9_6
_UpperCAmelCase : Any = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : List[Any] = 0.1
elif "b4" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = 4
elif "l7" in checkpoint_url:
_UpperCAmelCase : str = 7
_UpperCAmelCase : List[str] = 1_0_2_4
_UpperCAmelCase : int = 4_0_9_6
_UpperCAmelCase : List[str] = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : Optional[int] = 0.1
_UpperCAmelCase : Any = ViTMSNModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )["target_encoder"]
_UpperCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = create_rename_keys(_SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
_UpperCAmelCase : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_UpperCAmelCase : int = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
_UpperCAmelCase : Any = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
_UpperCAmelCase : List[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 328
| 0
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _a ( )-> int:
SCREAMING_SNAKE_CASE_ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if not hasattr(lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ = args.func(lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 360
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE: Any = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: List[Any] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 360
| 1
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[Any], SCREAMING_SNAKE_CASE__: Any ) -> Optional[int]:
"""simple docstring"""
__a = []
for part_id in partition_order:
__a = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE__ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__a = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__a = spark.range(100 ).repartition(1 )
__a = Spark(SCREAMING_SNAKE_CASE__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__a = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__a = spark.range(10 ).repartition(2 )
__a = [1, 0]
__a = _generate_iterable_examples(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Reverse the partitions.
__a = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__a , __a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__a = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__a = spark.range(10 ).repartition(1 )
__a = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__a = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__a = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
__a = lambda SCREAMING_SNAKE_CASE__ : x.reverse()
__a = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__, [2, 1, 0] )
__a = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shuffle_data_sources(SCREAMING_SNAKE_CASE__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
__a , __a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
__a = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__a = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__a = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
__a = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__, [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
__a , __a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__a = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
__a = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__, [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
__a , __a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
__a = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__a = spark.range(100 ).repartition(1 )
__a = Spark(SCREAMING_SNAKE_CASE__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 711
|
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: bool = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
__a = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
__a = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE__, 1 ):
if n < _p:
# then we have our last prime to check
__a = primes[:idx]
break
__a , __a = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__a = False
for r in range(SCREAMING_SNAKE_CASE__ ):
__a = pow(SCREAMING_SNAKE_CASE__, d * 2**r, SCREAMING_SNAKE_CASE__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__a = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 270
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Dict = (UniPCMultistepScheduler,)
__lowercase : Dict = (('num_inference_steps', 25),)
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:int ):
snake_case__ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Dict=0 , **_a:Tuple ):
snake_case__ = dict(self.forward_default_kwargs )
snake_case__ = kwargs.pop('''num_inference_steps''' , _a )
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**_a )
snake_case__ = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
snake_case__ = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ , snake_case__ = sample, sample
for t in range(_a , time_step + scheduler.config.solver_order + 1 ):
snake_case__ = scheduler.step(_a , _a , _a , **_a ).prev_sample
snake_case__ = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str=0 , **_a:List[str] ):
snake_case__ = dict(self.forward_default_kwargs )
snake_case__ = kwargs.pop('''num_inference_steps''' , _a )
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
snake_case__ = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ = scheduler.step(_a , _a , _a , **_a ).prev_sample
snake_case__ = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:List[Any]=None , **_a:List[str] ):
if scheduler is None:
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**_a )
snake_case__ = scheduler_class(**_a )
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**_a )
snake_case__ = scheduler_class(**_a )
snake_case__ = 10
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = model(_a , _a )
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = dict(self.forward_default_kwargs )
snake_case__ = kwargs.pop('''num_inference_steps''' , _a )
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_a , '''set_timesteps''' ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a , '''set_timesteps''' ):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case__ = dummy_past_residuals[: scheduler.config.solver_order]
snake_case__ = scheduler.timesteps[5]
snake_case__ = scheduler.timesteps[6]
snake_case__ = scheduler.step(_a , _a , _a , **_a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a , **_a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:int ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case__ = UniPCMultistepScheduler(**self.get_scheduler_config() )
snake_case__ = self.full_loop(scheduler=_a )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
snake_case__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case__ = DEISMultistepScheduler.from_config(scheduler.config )
snake_case__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case__ = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case__ = self.full_loop(scheduler=_a )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.check_over_configs(thresholding=_a )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , solver_order=_a , solver_type=_a , )
def SCREAMING_SNAKE_CASE__ ( self:int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_a , solver_type=_a , prediction_type=_a , )
snake_case__ = self.full_loop(
solver_order=_a , solver_type=_a , prediction_type=_a , )
assert not torch.isnan(_a ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self:Any ):
self.check_over_configs(lower_order_final=_a )
self.check_over_configs(lower_order_final=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=_a , time_step=0 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.full_loop()
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.full_loop(prediction_type='''v_prediction''' )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 )
snake_case__ = scheduler_class(**_a )
snake_case__ = 10
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = model(_a , _a )
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
assert sample.dtype == torch.floataa
def SCREAMING_SNAKE_CASE__ ( self:List[str] , **_a:List[str] ):
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**_a )
snake_case__ = scheduler_class(**_a )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 33
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a ) -> Optional[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = None , a = 0.0 , a = 50 , a = None , a = "pil" , a = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , a ):
snake_case_ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case_ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = randn_tensor(a , generator=a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case_ = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ = self.scheduler.step(
a , a , a , eta=a , use_clipped_model_output=a , generator=a ).prev_sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 198
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_UpperCamelCase = """Create a default config file for Accelerate with only a few flags set."""
def _a ( _snake_case="no" , _snake_case = default_json_config_file , _snake_case = False ):
"""simple docstring"""
UpperCAmelCase = Path(_snake_case )
path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
UpperCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
UpperCAmelCase = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase = torch.cuda.device_count()
UpperCAmelCase = num_gpus
UpperCAmelCase = False
if num_gpus > 1:
UpperCAmelCase = """MULTI_GPU"""
else:
UpperCAmelCase = """NO"""
elif is_xpu_available() and use_xpu:
UpperCAmelCase = torch.xpu.device_count()
UpperCAmelCase = num_xpus
UpperCAmelCase = False
if num_xpus > 1:
UpperCAmelCase = """MULTI_XPU"""
else:
UpperCAmelCase = """NO"""
elif is_npu_available():
UpperCAmelCase = torch.npu.device_count()
UpperCAmelCase = num_npus
UpperCAmelCase = False
if num_npus > 1:
UpperCAmelCase = """MULTI_NPU"""
else:
UpperCAmelCase = """NO"""
else:
UpperCAmelCase = 0
UpperCAmelCase = True
UpperCAmelCase = 1
UpperCAmelCase = """NO"""
UpperCAmelCase = ClusterConfig(**_snake_case )
config.to_json_file(_snake_case )
return path
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = parser.add_parser("""default""" , parents=_snake_case , help=_snake_case , formatter_class=_snake_case )
parser.add_argument(
"""--config_file""" , default=_snake_case , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=_snake_case , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=_snake_case )
return parser
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 74
|
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 74
| 1
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : int = 'PoolFormerConfig'
# Base docstring
__A : str = 'sail/poolformer_s12'
__A : Any = [1, 5_1_2, 7, 7]
# Image classification docstring
__A : List[Any] = 'sail/poolformer_s12'
__A : str = 'tabby, tabby cat'
__A : Dict = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __a ( A__ : Optional[int] , A__ : float = 0.0 , A__ : bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE = 1 - drop_prob
SCREAMING_SNAKE_CASE = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE = input.div(A__ ) * random_tensor
return output
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Optional[float] = None ):
super().__init__()
SCREAMING_SNAKE_CASE = drop_prob
def _snake_case ( self : int , __lowerCamelCase : torch.Tensor ):
return drop_path(__lowerCamelCase , self.drop_prob , self.training )
def _snake_case ( self : List[Any] ):
return "p={}".format(self.drop_prob )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=None ):
super().__init__()
SCREAMING_SNAKE_CASE = patch_size if isinstance(__lowerCamelCase , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE = stride if isinstance(__lowerCamelCase , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE = padding if isinstance(__lowerCamelCase , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE = nn.Convad(__lowerCamelCase , __lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=__lowerCamelCase )
SCREAMING_SNAKE_CASE = norm_layer(__lowerCamelCase ) if norm_layer else nn.Identity()
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = self.projection(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.norm(__lowerCamelCase )
return embeddings
class _SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : Any , **__lowerCamelCase : List[str] ):
super().__init__(1 , __lowerCamelCase , **__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : Dict ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.AvgPoolad(__lowerCamelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : int ):
return self.pool(__lowerCamelCase ) - hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 )
SCREAMING_SNAKE_CASE = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 )
SCREAMING_SNAKE_CASE = PoolFormerDropPath(__lowerCamelCase )
if isinstance(config.hidden_act , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE = config.hidden_act
def _snake_case ( self : Any , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = self.conva(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.act_fn(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.drop(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.conva(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.drop(__lowerCamelCase )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
super().__init__()
SCREAMING_SNAKE_CASE = PoolFormerPooling(__lowerCamelCase )
SCREAMING_SNAKE_CASE = PoolFormerOutput(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(__lowerCamelCase )
SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(__lowerCamelCase )
# Useful for training neural nets
SCREAMING_SNAKE_CASE = PoolFormerDropPath(__lowerCamelCase ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCamelCase) ) , requires_grad=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCamelCase) ) , requires_grad=__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[int] ):
if self.use_layer_scale:
SCREAMING_SNAKE_CASE = self.pooling(self.before_norm(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(__lowerCamelCase )
SCREAMING_SNAKE_CASE = ()
SCREAMING_SNAKE_CASE = self.output(self.after_norm(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE = hidden_states + self.drop_path(__lowerCamelCase )
SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE = self.drop_path(self.pooling(self.before_norm(__lowerCamelCase ) ) )
# First residual connection
SCREAMING_SNAKE_CASE = pooling_output + hidden_states
SCREAMING_SNAKE_CASE = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE = self.drop_path(self.output(self.after_norm(__lowerCamelCase ) ) )
SCREAMING_SNAKE_CASE = hidden_states + layer_output
SCREAMING_SNAKE_CASE = (output,) + outputs
return outputs
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : Dict ):
super().__init__()
SCREAMING_SNAKE_CASE = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE = nn.ModuleList(__lowerCamelCase )
# Transformer blocks
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__lowerCamelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = nn.ModuleList(__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : List[Any]=True ):
SCREAMING_SNAKE_CASE = () if output_hidden_states else None
SCREAMING_SNAKE_CASE = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE = embedding_layer(__lowerCamelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = blk(__lowerCamelCase )
SCREAMING_SNAKE_CASE = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase , hidden_states=__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = PoolFormerConfig
lowerCamelCase__ = "poolformer"
lowerCamelCase__ = "pixel_values"
lowerCamelCase__ = True
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Any ):
if isinstance(__lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any=False ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = value
__A : Optional[int] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__A : Dict = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , __snake_case , )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : str ):
super().__init__(__lowerCamelCase )
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = PoolFormerEncoder(__lowerCamelCase )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self : Any ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
SCREAMING_SNAKE_CASE = self.encoder(
__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCamelCase , hidden_states=encoder_outputs.hidden_states , )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : List[str] ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self : Tuple , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = self.dense(__lowerCamelCase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , __snake_case , )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : List[str] ):
super().__init__(__lowerCamelCase )
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = PoolFormerModel(__lowerCamelCase )
# Final norm
SCREAMING_SNAKE_CASE = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[torch.LongTensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.poolformer(
__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = outputs[0]
SCREAMING_SNAKE_CASE = self.classifier(self.norm(__lowerCamelCase ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE = "single_label_classification"
else:
SCREAMING_SNAKE_CASE = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE = loss_fct(__lowerCamelCase , __lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE = loss_fct(__lowerCamelCase , __lowerCamelCase )
if not return_dict:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states )
| 16
|
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __magic_name__:
def __init__( self : Optional[int] , __UpperCamelCase : Collection[float] | None = None ):
'''simple docstring'''
if components is None:
snake_case__ = []
snake_case__ = list(__UpperCamelCase )
def __len__( self : int ):
'''simple docstring'''
return len(self.__components )
def __str__( self : Dict ):
'''simple docstring'''
return "(" + ",".join(map(__UpperCamelCase , self.__components ) ) + ")"
def __add__( self : str , __UpperCamelCase : Vector ):
'''simple docstring'''
snake_case__ = len(self )
if size == len(__UpperCamelCase ):
snake_case__ = [self.__components[i] + other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )]
return Vector(__UpperCamelCase )
else:
raise Exception("""must have the same size""" )
def __sub__( self : List[Any] , __UpperCamelCase : Vector ):
'''simple docstring'''
snake_case__ = len(self )
if size == len(__UpperCamelCase ):
snake_case__ = [self.__components[i] - other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )]
return Vector(__UpperCamelCase )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self : List[str] , __UpperCamelCase : float ):
'''simple docstring'''
...
@overload
def __mul__( self : Any , __UpperCamelCase : Vector ):
'''simple docstring'''
...
def __mul__( self : Optional[Any] , __UpperCamelCase : float | Vector ):
'''simple docstring'''
if isinstance(__UpperCamelCase , (float, int) ):
snake_case__ = [c * other for c in self.__components]
return Vector(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and len(self ) == len(__UpperCamelCase ):
snake_case__ = len(self )
snake_case__ = [self.__components[i] * other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )]
return sum(__UpperCamelCase )
else: # error case
raise Exception("""invalid operand!""" )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
return Vector(self.__components )
def __lowerCAmelCase( self : Union[str, Any] , __UpperCamelCase : int ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __lowerCAmelCase( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : float ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
snake_case__ = value
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
snake_case__ = [c**2 for c in self.__components]
return math.sqrt(sum(__UpperCamelCase ) )
def __lowerCAmelCase( self : str , __UpperCamelCase : Vector , __UpperCamelCase : bool = False ):
'''simple docstring'''
snake_case__ = self * other
snake_case__ = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case__ ( a ) -> Vector:
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return Vector([0] * dimension )
def snake_case__ ( a , a ) -> Vector:
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ))
snake_case__ = [0] * dimension
snake_case__ = 1
return Vector(_SCREAMING_SNAKE_CASE )
def snake_case__ ( a , a , a ) -> Vector:
'''simple docstring'''
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (isinstance(_SCREAMING_SNAKE_CASE , (int, float) ))
)
return x * scalar + y
def snake_case__ ( a , a , a ) -> Vector:
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
snake_case__ = [random.randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )]
return Vector(_SCREAMING_SNAKE_CASE )
class __magic_name__:
def __init__( self : str , __UpperCamelCase : list[list[float]] , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case__ = matrix
snake_case__ = w
snake_case__ = h
def __str__( self : List[Any] ):
'''simple docstring'''
snake_case__ = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : int , __UpperCamelCase : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
snake_case__ = []
for i in range(self.__height ):
snake_case__ = [
self.__matrix[i][j] + other.component(__UpperCamelCase , __UpperCamelCase )
for j in range(self.__width )
]
matrix.append(__UpperCamelCase )
return Matrix(__UpperCamelCase , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self : Optional[Any] , __UpperCamelCase : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
snake_case__ = []
for i in range(self.__height ):
snake_case__ = [
self.__matrix[i][j] - other.component(__UpperCamelCase , __UpperCamelCase )
for j in range(self.__width )
]
matrix.append(__UpperCamelCase )
return Matrix(__UpperCamelCase , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self : Dict , __UpperCamelCase : float ):
'''simple docstring'''
...
@overload
def __mul__( self : str , __UpperCamelCase : Vector ):
'''simple docstring'''
...
def __mul__( self : int , __UpperCamelCase : float | Vector ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ): # matrix-vector
if len(__UpperCamelCase ) == self.__width:
snake_case__ = zero_vector(self.__height )
for i in range(self.__height ):
snake_case__ = [
self.__matrix[i][j] * other.component(__UpperCamelCase )
for j in range(self.__width )
]
ans.change_component(__UpperCamelCase , sum(__UpperCamelCase ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(__UpperCamelCase , (int, float) ): # matrix-scalar
snake_case__ = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__UpperCamelCase , self.__width , self.__height )
return None
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
return self.__height
def __lowerCAmelCase( self : int ):
'''simple docstring'''
return self.__width
def __lowerCAmelCase( self : str , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __lowerCAmelCase( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
snake_case__ = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __lowerCAmelCase( self : Any , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
snake_case__ = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__UpperCamelCase ) ):
snake_case__ = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__UpperCamelCase , self.__width - 1 , self.__height - 1 ).determinant()
def __lowerCAmelCase( self : Dict , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__UpperCamelCase , __UpperCamelCase )
else:
raise Exception("""Indices out of bounds""" )
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
snake_case__ = [
self.__matrix[0][y] * self.cofactor(0 , __UpperCamelCase ) for y in range(self.__width )
]
return sum(__UpperCamelCase )
def snake_case__ ( a ) -> Matrix:
'''simple docstring'''
snake_case__ = [[0] * n for _ in range(_SCREAMING_SNAKE_CASE )]
return Matrix(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case__ ( a , a , a , a ) -> Matrix:
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
snake_case__ = [
[random.randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )
]
return Matrix(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 701
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def snake_case__ ( a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
snake_case__ = StableDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
snake_case__ = load_file(a )
snake_case__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
snake_case__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
snake_case__ = pipeline.text_encoder
else:
snake_case__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
snake_case__ = pipeline.unet
# find the target layer
snake_case__ = layer_infos.pop(0 )
while len(a ) > -1:
try:
snake_case__ = curr_layer.__getattr__(a )
if len(a ) > 0:
snake_case__ = layer_infos.pop(0 )
elif len(a ) == 0:
break
except Exception:
if len(a ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
snake_case__ = layer_infos.pop(0 )
snake_case__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(a )
else:
pair_keys.append(a )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
snake_case__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
snake_case__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a , a ).unsqueeze(2 ).unsqueeze(3 )
else:
snake_case__ = state_dict[pair_keys[0]].to(torch.floataa )
snake_case__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a , a )
# update visited list
for item in pair_keys:
visited.append(a )
return pipeline
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
a__ = parser.parse_args()
a__ = args.base_model_path
a__ = args.checkpoint_path
a__ = args.dump_path
a__ = args.lora_prefix_unet
a__ = args.lora_prefix_text_encoder
a__ = args.alpha
a__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
a__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 566
| 0
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = [int(UpperCamelCase ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(UpperCamelCase ) == 4 and all(0 <= int(UpperCamelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
_snake_case : Optional[Any] = input().strip()
_snake_case : Any = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 22
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : str = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['LayoutLMv3FeatureExtractor']
_snake_case : Tuple = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 22
| 1
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( __lowerCamelCase ):
'''simple docstring'''
__snake_case = ["image_processor", "tokenizer"]
__snake_case = "FlavaImageProcessor"
__snake_case = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase_ , )
snake_case_ = kwargs.pop('''feature_extractor''' )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self.image_processor
def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case_ = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
if images is not None:
snake_case_ = self.image_processor(
UpperCAmelCase_ , return_image_mask=UpperCAmelCase_ , return_codebook_pixels=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
if text is not None and images is not None:
encoding.update(UpperCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def UpperCamelCase__ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase__ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def UpperCamelCase__ ( self ):
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase_ , )
return self.image_processor
| 706
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase__ ( self ):
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = '''A red cat sitting on a park bench'''
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images
snake_case_ = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case_ = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = '''A red cat sitting on a park bench'''
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images
snake_case_ = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case_ = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 531
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCamelCase : Dict = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__UpperCamelCase : str = {'''facebook/blenderbot-3B''': 128}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = BlenderbotTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="replace" , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case=False , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , _snake_case , tokenizer_file=_snake_case , errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _snake_case ) != add_prefix_space:
lowerCAmelCase = getattr(_snake_case , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**_snake_case )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , _snake_case , _snake_case )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , _snake_case ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , _snake_case ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(_snake_case , state.pop('type' ) )
lowerCAmelCase = component_class(**_snake_case )
setattr(self.backend_tokenizer , _snake_case , _snake_case )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else value
lowerCAmelCase = value
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = kwargs.get('is_split_into_words' , _snake_case )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_snake_case , **_snake_case )
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = kwargs.get('is_split_into_words' , _snake_case )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_snake_case )
lowerCAmelCase = ' '.join(_snake_case )
lowerCAmelCase = self.encode(_snake_case )
if len(_snake_case ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 4
|
from heapq import heappop, heappush
import numpy as np
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
__lowercase , __lowercase = grid.shape
__lowercase = [-1, 1, 0, 0]
__lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__lowercase , __lowercase = [(0, source)], set()
__lowercase = np.full((rows, cols) , np.inf )
__lowercase = 0
__lowercase = np.empty((rows, cols) , dtype=_UpperCAmelCase )
__lowercase = None
while queue:
((__lowercase) , (__lowercase)) = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__lowercase = []
while (x, y) != source:
path.append((x, y) )
__lowercase , __lowercase = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
__lowercase , __lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
__lowercase = dist + 1
__lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] =False, False, False
@dataclass
class _a :
_UpperCamelCase: Optional[int] = None
_UpperCamelCase: bool = True
_UpperCamelCase: bool = True
_UpperCamelCase: Optional[str] = None
# Automatically constructed
_UpperCamelCase: ClassVar[str] = "dict"
_UpperCamelCase: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_UpperCamelCase: str = field(default="Audio" , init=snake_case_ , repr=snake_case_ )
def __call__( self ) -> List[str]:
return self.pa_type
def _snake_case ( self , lowercase_ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowercase_ , lowercase_ ):
return {"bytes": None, "path": value}
elif isinstance(lowercase_ , lowercase_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase : Dict = BytesIO()
sf.write(lowercase_ , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase : List[str] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowerCAmelCase : Dict = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32767
lowerCAmelCase : List[str] = BytesIO(bytes() )
sf.write(lowercase_ , lowercase_ , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCAmelCase , lowerCAmelCase : Tuple = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCAmelCase : List[str] = xsplitext(lowercase_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCAmelCase : Optional[Any] = token_per_repo_id or {}
lowerCAmelCase : Dict = path.split("""::""" )[-1]
try:
lowerCAmelCase : List[Any] = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCAmelCase : int = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase : Dict = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = sf.read(lowercase_ )
else:
lowerCAmelCase , lowerCAmelCase : List[str] = sf.read(lowercase_ )
lowerCAmelCase : List[Any] = array.T
if self.mono:
lowerCAmelCase : Tuple = librosa.to_mono(lowercase_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase : int = librosa.resample(lowercase_ , orig_sr=lowercase_ , target_sr=self.sampling_rate )
lowerCAmelCase : List[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _snake_case ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _snake_case ( self , lowercase_ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowerCAmelCase : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowerCAmelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase : int = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowerCAmelCase : int = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCAmelCase : Dict = pa.array([Audio().encode_example(lowercase_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCAmelCase : Any = storage.field("""bytes""" )
else:
lowerCAmelCase : Optional[int] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCAmelCase : List[Any] = storage.field("""path""" )
else:
lowerCAmelCase : Optional[int] = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowerCAmelCase : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowercase_ , self.pa_type )
def _snake_case ( self , lowercase_ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowercase_ ):
with xopen(lowercase_ , """rb""" ) as f:
lowerCAmelCase : Dict = f.read()
return bytes_
lowerCAmelCase : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
| 693
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693
| 1
|
import collections
import os
import re
from pathlib import Path
A_ : List[str] = 'src/transformers'
# Matches is_xxx_available()
A_ : Any = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
A_ : Optional[int] = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A_ : Dict = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
A_ : Dict = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
A_ : Tuple = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A_ : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
A_ : Dict = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
A_ : Tuple = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
A_ : Union[str, Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
A_ : Any = re.compile(r'^\s*try:')
# Catches a line with else:
A_ : Optional[Any] = re.compile(r'^\s*else:')
def UpperCamelCase (lowercase_: Optional[Any] ) -> Any:
if _re_test_backend.search(lowercase_ ) is None:
return None
A__ : Optional[int] = [b[0] for b in _re_backend.findall(lowercase_ )]
backends.sort()
return "_and_".join(lowercase_ )
def UpperCamelCase (lowercase_: Any ) -> Dict:
with open(lowercase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ : Optional[Any] = f.readlines()
A__ : Optional[Any] = 0
while line_index < len(lowercase_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase_ ):
return None
# First grab the objects without a specific backend in _import_structure
A__ : List[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
A__ : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase_ ):
A__ : str = _re_one_line_import_struct.search(lowercase_ ).groups()[0]
A__ : Union[str, Any] = re.findall(r"""\[([^\]]+)\]""" , lowercase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
A__ : int = _re_import_struct_key_value.search(lowercase_ )
if single_line_import_search is not None:
A__ : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
A__ : str = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
A__ : Any = lines[line_index]
if _re_import_struct_add_one.search(lowercase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase_ ) is not None:
A__ : Any = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(""", """ )
A__ : int = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_between_brackets.search(lowercase_ ) is not None:
A__ : Any = _re_between_brackets.search(lowercase_ ).groups()[0].split(""", """ )
A__ : Any = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_quote_object.search(lowercase_ ) is not None:
objects.append(_re_quote_object.search(lowercase_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
A__ : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ : Any = []
while (
line_index < len(lowercase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
A__ : Dict = lines[line_index]
A__ : Any = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ : List[str] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
A__ : Union[str, Any] = lines[line_index]
A__ : List[str] = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
A__ : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Union[str, Any] ) -> List[Any]:
def find_duplicates(lowercase_: Tuple ):
return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ : str = []
for key in import_dict_objects.keys():
A__ : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
A__ : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ : Tuple = """base imports""" if key == """none""" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def UpperCamelCase () -> str:
A__ : str = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
A__ : Tuple = os.path.join(lowercase_ , """__init__.py""" )
A__ : Union[str, Any] = parse_init(lowercase_ )
if objects is not None:
A__ : List[Any] = analyze_results(*lowercase_ )
if len(lowercase_ ) > 0:
A__ : int = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(lowercase_ ) )
if len(lowercase_ ) > 0:
raise ValueError("""\n\n""".join(lowercase_ ) )
def UpperCamelCase () -> Dict:
A__ : int = []
for path, directories, files in os.walk(lowercase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowercase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
A__ : Union[str, Any] = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) )
A__ : List[str] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowercase_ )
for fname in files:
if fname == "__init__.py":
continue
A__ : Any = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) )
A__ : Union[str, Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowercase_ )
return submodules
A_ : str = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def UpperCamelCase () -> str:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
A__ : Any = direct_transformers_import(lowercase_ )
A__ : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowercase_ , """__init__.py""" ) , """r""" ) as f:
A__ : str = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowercase_ ) ) )
A__ : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowercase_ ) > 0:
A__ : Dict = """\n""".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 456
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
A_ : Any = logging.get_logger(__name__)
class _a :
'''simple docstring'''
UpperCAmelCase__: str = None
@experimental
def UpperCamelCase (lowercase_: int , lowercase_: Any , lowercase_: str , lowercase_: str , lowercase_: List[str] , lowercase_: List[Any] , lowercase_: Union[str, Any] ) -> Union[str, Any]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return _map_with_joblib(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Tuple , lowercase_: List[Any] , lowercase_: Tuple , lowercase_: Tuple , lowercase_: List[str] , lowercase_: Dict , lowercase_: Tuple ) -> Tuple:
A__ : Union[str, Any] = num_proc if num_proc <= len(lowercase_ ) else len(lowercase_ )
A__ : str = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase_ ):
A__ : Tuple = len(lowercase_ ) // num_proc
A__ : Tuple = len(lowercase_ ) % num_proc
A__ : Optional[Any] = div * index + min(lowercase_ , lowercase_ )
A__ : Union[str, Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase_ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(lowercase_ )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(lowercase_ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
A__ , A__ : Optional[int] = None, None
if not disable_tqdm:
A__ , A__ : List[str] = (RLock(),), tqdm.set_lock
with Pool(lowercase_ , initargs=lowercase_ , initializer=lowercase_ ) as pool:
A__ : Optional[Any] = pool.map(lowercase_ , lowercase_ )
logger.info(f"""Finished {num_proc} processes""" )
A__ : Tuple = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(lowercase_ )} objects""" )
return mapped
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: str , lowercase_: Union[str, Any] , lowercase_: List[Any] , lowercase_: Dict , lowercase_: Optional[Any] ) -> Union[str, Any]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowercase_ ):
return joblib.Parallel()(
joblib.delayed(lowercase_ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def UpperCamelCase (lowercase_: str ) -> str:
A__ : int = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
A__ : Optional[Any] = None
| 456
| 1
|
import os
import sys
import unittest
__UpperCamelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__UpperCamelCase : Optional[int] = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
__UpperCamelCase : List[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Tuple = get_test_to_tester_mapping(_lowerCamelCase )
__lowerCamelCase : Optional[int] = get_test_to_tester_mapping(_lowerCamelCase )
__lowerCamelCase : Dict = {"""BertModelTest""": """BertModelTester"""}
__lowerCamelCase : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : List[str] = get_model_to_test_mapping(_lowerCamelCase )
__lowerCamelCase : Optional[int] = get_model_to_test_mapping(_lowerCamelCase )
__lowerCamelCase : Optional[int] = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
__lowerCamelCase : Optional[Any] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[str] = get_model_to_tester_mapping(_lowerCamelCase )
__lowerCamelCase : Optional[int] = get_model_to_tester_mapping(_lowerCamelCase )
__lowerCamelCase : int = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
__lowerCamelCase : Optional[Any] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
| 458
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( A,unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] = ReformerTokenizer
a_ : Optional[int] = ReformerTokenizerFast
a_ : Optional[int] = True
a_ : List[Any] = False
a_ : Any = True
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
__lowerCamelCase : str = ReformerTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : List[str] = """<s>"""
__lowerCamelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_lowerCamelCase ) , 1_0_0_0 )
def _snake_case ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def _snake_case ( self : Tuple ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCamelCase : Dict = self.get_tokenizer()
__lowerCamelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCamelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
__lowerCamelCase : Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
__lowerCamelCase : Optional[int] = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : str = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
__lowerCamelCase : Tuple = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : str = self.get_rust_tokenizer()
__lowerCamelCase : Dict = tokenizer.encode(_lowerCamelCase )
__lowerCamelCase : Dict = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( self : Any , _lowerCamelCase : int=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
__lowerCamelCase : Any = """This is a simple input"""
__lowerCamelCase : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowerCamelCase : Union[str, Any] = ("""This is a simple input""", """This is a pair""")
__lowerCamelCase : Optional[int] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , )
def _snake_case ( self : List[str] ):
'''simple docstring'''
pass
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Any = ReformerTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
__lowerCamelCase : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
__lowerCamelCase : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
__lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self : List[str] ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Dict = """Hello World!"""
__lowerCamelCase : Any = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowerCamelCase : List[str] = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
__lowerCamelCase : int = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
__lowerCamelCase : Union[str, Any] = """ """.join(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors="""pt""" )
__lowerCamelCase : Union[str, Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
__lowerCamelCase : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
__lowerCamelCase : List[str] = encoded_sequence["""input_ids"""].shape
__lowerCamelCase : Optional[Any] = ReformerModel(_lowerCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase )
model(**_lowerCamelCase )
@slow
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Tuple = {"""input_ids""": [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
__lowerCamelCase : int = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=_lowerCamelCase , sequences=_lowerCamelCase , )
| 458
| 1
|
def __lowercase ( __lowerCAmelCase : list ):
if len(__lowerCAmelCase ) <= 1:
return [tuple(__lowerCAmelCase )]
a__ = []
def generate(__lowerCAmelCase : int , __lowerCAmelCase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __lowerCAmelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
a__ , a__ = arr[k - 1], arr[i]
else: # k is odd
a__ , a__ = arr[k - 1], arr[0]
generate(k - 1 , __lowerCAmelCase )
generate(len(__lowerCAmelCase ) , __lowerCAmelCase )
return res
if __name__ == "__main__":
snake_case : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Dict = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 335
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[str] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = '''lxmert'''
UpperCAmelCase__ : Any = {}
def __init__( self :Dict ,__snake_case :Optional[Any]=3_05_22 ,__snake_case :int=7_68 ,__snake_case :int=12 ,__snake_case :Any=95_00 ,__snake_case :Union[str, Any]=16_00 ,__snake_case :str=4_00 ,__snake_case :Optional[Any]=30_72 ,__snake_case :List[str]="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Dict=5_12 ,__snake_case :str=2 ,__snake_case :List[str]=0.02 ,__snake_case :Optional[int]=1E-12 ,__snake_case :Any=9 ,__snake_case :List[str]=5 ,__snake_case :Optional[Any]=5 ,__snake_case :str=20_48 ,__snake_case :Optional[Any]=4 ,__snake_case :str=6.67 ,__snake_case :Union[str, Any]=True ,__snake_case :str=True ,__snake_case :int=True ,__snake_case :List[str]=True ,__snake_case :List[Any]=True ,__snake_case :str=True ,__snake_case :List[str]=True ,**__snake_case :Optional[Any] ,) -> str:
a__ = vocab_size
a__ = hidden_size
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = num_qa_labels
a__ = num_object_labels
a__ = num_attr_labels
a__ = l_layers
a__ = x_layers
a__ = r_layers
a__ = visual_feat_dim
a__ = visual_pos_dim
a__ = visual_loss_normalizer
a__ = task_matched
a__ = task_mask_lm
a__ = task_obj_predict
a__ = task_qa
a__ = visual_obj_loss
a__ = visual_attr_loss
a__ = visual_feat_loss
a__ = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__snake_case )
| 335
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase = {
'yjernite/retribert-base-uncased': 5_1_2,
}
__lowerCAmelCase = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Optional[Any] = RetriBertTokenizer
__UpperCAmelCase : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]=True ,_a : Dict="[UNK]" ,_a : str="[SEP]" ,_a : Union[str, Any]="[PAD]" ,_a : Dict="[CLS]" ,_a : Optional[Any]="[MASK]" ,_a : Optional[int]=True ,_a : Optional[int]=None ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
UpperCAmelCase__ ,tokenizer_file=UpperCAmelCase__ ,do_lower_case=UpperCAmelCase__ ,unk_token=UpperCAmelCase__ ,sep_token=UpperCAmelCase__ ,pad_token=UpperCAmelCase__ ,cls_token=UpperCAmelCase__ ,mask_token=UpperCAmelCase__ ,tokenize_chinese_chars=UpperCAmelCase__ ,strip_accents=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
_a : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' ,UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,UpperCAmelCase__ ) != tokenize_chinese_chars
):
_a : int = getattr(UpperCAmelCase__ ,normalizer_state.pop('type' ) )
_a : List[Any] = do_lower_case
_a : Union[str, Any] = strip_accents
_a : int = tokenize_chinese_chars
_a : int = normalizer_class(**UpperCAmelCase__ )
_a : Union[str, Any] = do_lower_case
def __lowercase ( self : Dict ,_a : Tuple ,_a : Union[str, Any]=None ):
'''simple docstring'''
_a : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[Any] = [self.sep_token_id]
_a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : str ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : Tuple = self._tokenizer.model.save(UpperCAmelCase__ ,name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 718
|
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[Any] = inspect.getfile(accelerate.test_utils )
_a : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_a : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Tuple = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
_a : Tuple = [sys.executable] + distributed_args
execute_subprocess_async(_a ,env=os.environ.copy() )
| 319
| 0
|
'''simple docstring'''
import math
def __UpperCAmelCase ( A : Tuple ) -> str:
UpperCAmelCase_ : int = [True] * n
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCAmelCase_ : int = i * 2
while index < n:
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[int] = index + i
UpperCAmelCase_ : Dict = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def __UpperCAmelCase ( A : Tuple = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> List[str]:
UpperCAmelCase_ : str = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 1_0_0
UpperCAmelCase_ : str = prime_sieve(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : int = primes[prime_index]
while (last_prime**2) <= limit:
UpperCAmelCase_ : Tuple = primes[prime_index + 1]
UpperCAmelCase_ : List[str] = last_prime**2
UpperCAmelCase_ : Dict = next_prime**2
# Get numbers divisible by lps(current)
UpperCAmelCase_ : str = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCAmelCase_ : str = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCAmelCase_ : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCAmelCase_ : Optional[int] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 541
|
import collections
import os
import re
from pathlib import Path
UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCAmelCase = re.compile(R'''^\s*else:''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase = re.findall(r'\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
def find_duplicates(__SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase_ ( ):
lowercase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase_ ( ):
lowercase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase = direct_transformers_import(__SCREAMING_SNAKE_CASE )
lowercase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' ) as f:
lowercase = f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , __SCREAMING_SNAKE_CASE ) ) )
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 84
| 0
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ = logging.getLogger(__name__)
a_ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A_:
"""simple docstring"""
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
a_ : int = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(SCREAMING_SNAKE_CASE__ )} , )
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : str = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ : Optional[Any] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class A_:
"""simple docstring"""
a_ : List[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """The input training data file (a text file)."""} )
a_ : Dict = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
a_ : str = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ : List[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
a_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
a_ : Union[str, Any] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
a_ : str = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
a_ : int = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether ot not to use whole word mask."""} )
a_ : Dict = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ : List[Any] = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
a_ : List[Any] = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
a_ : List[Any] = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
a_ : str = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCAmelCase_ ( __a : Any , __a : List[Any] , __a : Tuple = False , __a : Tuple = None , ):
'''simple docstring'''
def _dataset(__a : Optional[Any] , __a : Optional[int]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=__a , file_path=__a , block_size=args.block_size , ref_path=__a , )
return LineByLineTextDataset(tokenizer=__a , file_path=__a , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__a , file_path=__a , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__a , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__a ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_lowerCamelCase : List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
_lowerCamelCase : Dict = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
_lowerCamelCase : List[str] = AutoModelWithLMHead.from_config(__a )
model.resize_token_embeddings(len(__a ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
_lowerCamelCase : Optional[int] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_lowerCamelCase : Dict = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_lowerCamelCase : Tuple = (
get_dataset(__a , tokenizer=__a , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_lowerCamelCase : List[Any] = (
get_dataset(__a , tokenizer=__a , evaluate=__a , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_lowerCamelCase : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=__a , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_lowerCamelCase : Any = DataCollatorForWholeWordMask(
tokenizer=__a , mlm_probability=data_args.mlm_probability )
else:
_lowerCamelCase : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=__a , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_lowerCamelCase : Tuple = Trainer(
model=__a , args=__a , data_collator=__a , train_dataset=__a , eval_dataset=__a , prediction_loss_only=__a , )
# Training
if training_args.do_train:
_lowerCamelCase : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__a )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCamelCase : Any = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCamelCase : List[str] = trainer.evaluate()
_lowerCamelCase : List[Any] = math.exp(eval_output['eval_loss'] )
_lowerCamelCase : str = {"perplexity": perplexity}
_lowerCamelCase : Any = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(__a , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __a , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(__a )
return results
def UpperCAmelCase_ ( __a : int ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 719
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=A , )
assert hasattr(self , 'env' )
def _lowerCAmelCase ( self , A ):
_lowerCamelCase : List[str] = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_lowerCamelCase : Any = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A , py_version='py36' , )
def _lowerCAmelCase ( self , A ):
TrainingJobAnalytics(A ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def _lowerCAmelCase ( self , A ):
# create estimator
_lowerCamelCase : List[Any] = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowerCamelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A )
| 349
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
A_: Tuple = None
A_: Union[str, Any] = logging.get_logger(__name__)
A_: Optional[int] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
A_: List[Any] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
A_: Union[str, Any] = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
A_: Union[str, Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = NllbTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
_lowercase = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
_lowercase = vocab_file
_lowercase = False if not self.vocab_file else True
_lowercase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_lowercase = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowercase = src_lang if src_lang is not None else """eng_Latn"""
_lowercase = self.convert_tokens_to_ids(self._src_lang )
_lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowercase = src_lang
_lowercase = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
_lowercase = self.convert_tokens_to_ids(UpperCAmelCase )
_lowercase = tgt_lang_id
return inputs
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = "eng_Latn" , UpperCAmelCase = None , UpperCAmelCase = "fra_Latn" , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = src_lang
_lowercase = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
_lowercase = []
_lowercase = [self.eos_token_id, self.cur_lang_code]
else:
_lowercase = [self.cur_lang_code]
_lowercase = [self.eos_token_id]
_lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
_lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
_lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
_lowercase = []
_lowercase = [self.eos_token_id, self.cur_lang_code]
else:
_lowercase = [self.cur_lang_code]
_lowercase = [self.eos_token_id]
_lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
_lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
_lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
_lowercase = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 398
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_: Tuple = logging.get_logger(__name__)
A_: str = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'bridgetower_vision_model'
def __init__( self , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=288 , UpperCAmelCase=1 , UpperCAmelCase=1e-05 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase )
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_channels
_lowercase = patch_size
_lowercase = image_size
_lowercase = initializer_factor
_lowercase = layer_norm_eps
_lowercase = stop_gradient
_lowercase = share_layernorm
_lowercase = remove_last_layer
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
_lowercase , _lowercase = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'bridgetower_text_model'
def __init__( self , UpperCAmelCase=50265 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=1 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=514 , UpperCAmelCase=1 , UpperCAmelCase=1e-05 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=True , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = initializer_factor
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = pad_token_id
_lowercase = bos_token_id
_lowercase = eos_token_id
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
_lowercase , _lowercase = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'bridgetower'
def __init__( self , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=768 , UpperCAmelCase=1 , UpperCAmelCase=1e-05 , UpperCAmelCase=False , UpperCAmelCase="add" , UpperCAmelCase=12 , UpperCAmelCase=6 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = kwargs.pop("""text_config_dict""" , UpperCAmelCase )
_lowercase = kwargs.pop("""vision_config_dict""" , UpperCAmelCase )
super().__init__(**UpperCAmelCase )
_lowercase = share_cross_modal_transformer_layers
_lowercase = hidden_act
_lowercase = hidden_size
_lowercase = initializer_factor
_lowercase = layer_norm_eps
_lowercase = share_link_tower_layers
_lowercase = link_tower_type
_lowercase = num_attention_heads
_lowercase = num_hidden_layers
_lowercase = tie_word_embeddings
_lowercase = init_layernorm_from_vision_encoder
if text_config is None:
_lowercase = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_lowercase = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_lowercase = BridgeTowerTextConfig(**UpperCAmelCase )
_lowercase = BridgeTowerVisionConfig(**UpperCAmelCase )
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.text_config.to_dict()
_lowercase = self.vision_config.to_dict()
_lowercase = self.__class__.model_type
return output
| 398
| 1
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
UpperCAmelCase__ = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_A )
assert base_extractor.is_extractable(_A )
UpperCAmelCase__ = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(_A , _A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding='utf-8' )
else:
UpperCAmelCase__ = output_path.read_text(encoding='utf-8' )
UpperCAmelCase__ = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
UpperCAmelCase__ = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
UpperCAmelCase__ = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_A )
UpperCAmelCase__ = Extractor.infer_extractor_format(_A )
assert extractor_format is not None
UpperCAmelCase__ = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(_A , _A , _A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding='utf-8' )
else:
UpperCAmelCase__ = output_path.read_text(encoding='utf-8' )
UpperCAmelCase__ = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a_ ( lowerCamelCase , lowerCamelCase ):
import tarfile
UpperCAmelCase__ = tmp_path / 'data_dot_dot'
directory.mkdir()
UpperCAmelCase__ = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(_A , 'w' ) as f:
f.add(_A , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def a_ ( lowerCamelCase ):
import tarfile
UpperCAmelCase__ = tmp_path / 'data_sym_link'
directory.mkdir()
UpperCAmelCase__ = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=_A )
with tarfile.TarFile(_A , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
UpperCAmelCase__ = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ = tmp_path / 'extracted'
TarExtractor.extract(_A , _A )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(_A )
assert zipfile.is_zipfile(str(_A ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_A ) # but we're right
| 709
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[Any] = ['model.decoder.embed_positions.weights']
def a_ ( lowerCamelCase ):
if "emb" in name:
UpperCAmelCase__ = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
UpperCAmelCase__ = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
UpperCAmelCase__ = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
UpperCAmelCase__ = name.replace('linear1' , 'fc1' )
if "linear2" in name:
UpperCAmelCase__ = name.replace('linear2' , 'fc2' )
if "norm1" in name:
UpperCAmelCase__ = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
UpperCAmelCase__ = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
UpperCAmelCase__ = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
UpperCAmelCase__ = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
UpperCAmelCase__ = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase__ = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(state_dict.keys() )
UpperCAmelCase__ = {}
for key in keys:
UpperCAmelCase__ = state_dict.pop(lowerCamelCase )
UpperCAmelCase__ = rename_keys(lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase__ = val[:hidden_size, :]
UpperCAmelCase__ = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase__ = val
else:
UpperCAmelCase__ = val
return state_dict, enc_dec_proj_state_dict
def a_ ( lowerCamelCase ):
if checkpoint == "small":
# default config values
UpperCAmelCase__ = 1_0_2_4
UpperCAmelCase__ = 2_4
UpperCAmelCase__ = 1_6
elif checkpoint == "medium":
UpperCAmelCase__ = 1_5_3_6
UpperCAmelCase__ = 4_8
UpperCAmelCase__ = 2_4
elif checkpoint == "large":
UpperCAmelCase__ = 2_0_4_8
UpperCAmelCase__ = 4_8
UpperCAmelCase__ = 3_2
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
UpperCAmelCase__ = MusicgenDecoderConfig(
hidden_size=lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase , num_attention_heads=lowerCamelCase , )
return config
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="cpu" ):
UpperCAmelCase__ = MusicGen.get_pretrained(lowerCamelCase , device=lowerCamelCase )
UpperCAmelCase__ = decoder_config_from_checkpoint(lowerCamelCase )
UpperCAmelCase__ = fairseq_model.lm.state_dict()
UpperCAmelCase__ , UpperCAmelCase__ = rename_state_dict(
lowerCamelCase , hidden_size=decoder_config.hidden_size )
UpperCAmelCase__ = TaEncoderModel.from_pretrained('t5-base' )
UpperCAmelCase__ = EncodecModel.from_pretrained('facebook/encodec_32khz' )
UpperCAmelCase__ = MusicgenForCausalLM(lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase__ , UpperCAmelCase__ = decoder.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowerCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
UpperCAmelCase__ = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase , audio_encoder=lowerCamelCase , decoder=lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase )
# check we can do a forward pass
UpperCAmelCase__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase__ = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
UpperCAmelCase__ = AutoTokenizer.from_pretrained('t5-base' )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
UpperCAmelCase__ = MusicgenProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
# set the appropriate bos/pad token ids
UpperCAmelCase__ = 2_0_4_8
UpperCAmelCase__ = 2_0_4_8
# set other default generation config params
UpperCAmelCase__ = int(3_0 * audio_encoder.config.frame_rate )
UpperCAmelCase__ = True
UpperCAmelCase__ = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowerCamelCase )
processor.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
lowerCAmelCase__ : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 632
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = BarthezTokenizer
_UpperCamelCase : List[str] = BarthezTokenizerFast
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : int = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_lowerCAmelCase )
_lowercase : Union[str, Any] = tokenizer
def __a ( self ):
_lowercase : Dict = '<pad>'
_lowercase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_lowerCAmelCase ) , 1_0_1_1_2_2 )
def __a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def __a ( self ):
_lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : Optional[Any] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_lowercase : Any = self.tokenizer(
_lowerCAmelCase , max_length=len(_lowerCAmelCase ) , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='pt' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_lowercase : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
if not self.test_rust_tokenizer:
return
_lowercase : int = self.get_tokenizer()
_lowercase : Tuple = self.get_rust_tokenizer()
_lowercase : List[str] = 'I was born in 92000, and this is falsé.'
_lowercase : str = tokenizer.tokenize(_lowerCAmelCase )
_lowercase : Optional[int] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
_lowercase : Tuple = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = self.get_rust_tokenizer()
_lowercase : Optional[int] = tokenizer.encode(_lowerCAmelCase )
_lowercase : int = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __a ( self ):
# fmt: off
_lowercase : List[str] = {'input_ids': [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_lowercase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_lowerCAmelCase , )
| 66
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] ="""Speech2TextFeatureExtractor"""
__UpperCAmelCase : Union[str, Any] ="""Speech2TextTokenizer"""
def __init__( self , __a , __a ):
super().__init__(__a , __a )
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
def __call__( self , *__a , **__a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__lowerCAmelCase = kwargs.pop("raw_speech" )
else:
__lowerCAmelCase = kwargs.pop("audio" , __a )
__lowerCAmelCase = kwargs.pop("sampling_rate" , __a )
__lowerCAmelCase = kwargs.pop("text" , __a )
if len(__a ) > 0:
__lowerCAmelCase = args[0]
__lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__lowerCAmelCase = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCAmelCase = encodings["input_ids"]
return inputs
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def snake_case ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer
yield
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
| 636
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=14 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=0.02 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = rotary_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = initializer_range
UpperCamelCase__ = None
UpperCamelCase__ = vocab_size - 1
UpperCamelCase__ = vocab_size - 1
UpperCamelCase__ = vocab_size - 1
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = 20
UpperCamelCase__ = model_class_name(_lowercase )
UpperCamelCase__ = model.init_cache(input_ids.shape[0] , _lowercase )
UpperCamelCase__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
UpperCamelCase__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCamelCase__ = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
UpperCamelCase__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCamelCase__ = model(
input_ids[:, -1:] , attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , position_ids=_lowercase , )
UpperCamelCase__ = model(_lowercase )
UpperCamelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = 20
UpperCamelCase__ = model_class_name(_lowercase )
UpperCamelCase__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCamelCase__ = model.init_cache(input_ids.shape[0] , _lowercase )
UpperCamelCase__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCamelCase__ = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
UpperCamelCase__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCamelCase__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowercase , position_ids=_lowercase , )
UpperCamelCase__ = model(_lowercase , attention_mask=_lowercase )
UpperCamelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
snake_case : Optional[int] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _lowerCamelCase ( self ):
UpperCamelCase__ = FlaxGPTJModelTester(self )
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase , _lowercase )
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowercase , _lowercase , _lowercase , _lowercase )
@tooslow
def _lowerCamelCase ( self ):
UpperCamelCase__ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
UpperCamelCase__ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=_lowercase , truncation=_lowercase )
UpperCamelCase__ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
UpperCamelCase__ = False
UpperCamelCase__ = model.config.eos_token_id
UpperCamelCase__ = jax.jit(model.generate )
UpperCamelCase__ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCamelCase__ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
UpperCamelCase__ = [
"""Hello this is a long string of text.\n\nI\'m trying to get the text of the""",
"""Hey, I\'m a little late to the party. I\'m going to""",
]
self.assertListEqual(_lowercase , _lowercase )
@is_pt_flax_cross_test
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCamelCase__ = self._prepare_for_class(_lowercase , _lowercase )
UpperCamelCase__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase__ = getattr(_lowercase , _lowercase )
UpperCamelCase__ , UpperCamelCase__ = pt_inputs["""input_ids"""].shape
UpperCamelCase__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = pt_model_class(_lowercase ).eval()
UpperCamelCase__ = model_class(_lowercase , dtype=jnp.floataa )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowercase )
UpperCamelCase__ = fx_state
with torch.no_grad():
UpperCamelCase__ = pt_model(**_lowercase ).to_tuple()
UpperCamelCase__ = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowercase )
UpperCamelCase__ = model_class.from_pretrained(_lowercase , from_pt=_lowercase )
UpperCamelCase__ = fx_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCamelCase__ = self._prepare_for_class(_lowercase , _lowercase )
UpperCamelCase__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase__ = getattr(_lowercase , _lowercase )
UpperCamelCase__ = pt_model_class(_lowercase ).eval()
UpperCamelCase__ = model_class(_lowercase , dtype=jnp.floataa )
UpperCamelCase__ = load_flax_weights_in_pytorch_model(_lowercase , fx_model.params )
UpperCamelCase__ , UpperCamelCase__ = pt_inputs["""input_ids"""].shape
UpperCamelCase__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 0
UpperCamelCase__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCamelCase__ = pt_model(**_lowercase ).to_tuple()
UpperCamelCase__ = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowercase )
UpperCamelCase__ = pt_model_class.from_pretrained(_lowercase , from_flax=_lowercase )
with torch.no_grad():
UpperCamelCase__ = pt_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
UpperCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 703
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """data2vec-audio"""
def __init__( self , __lowerCAmelCase=32 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="gelu" , __lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase=False , __lowerCAmelCase=16 , __lowerCAmelCase=19 , __lowerCAmelCase=5 , __lowerCAmelCase=0.05 , __lowerCAmelCase=10 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=10 , __lowerCAmelCase=0 , __lowerCAmelCase="sum" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=256 , __lowerCAmelCase=(512, 512, 512, 512, 1500) , __lowerCAmelCase=(5, 3, 3, 1, 1) , __lowerCAmelCase=(1, 2, 3, 1, 1) , __lowerCAmelCase=512 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=3 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = conv_pos_kernel_size
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# adapter
UpperCamelCase__ = add_adapter
UpperCamelCase__ = adapter_kernel_size
UpperCamelCase__ = adapter_stride
UpperCamelCase__ = num_adapter_layers
UpperCamelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = list(__lowerCAmelCase )
UpperCamelCase__ = xvector_output_dim
@property
def _lowerCamelCase ( self ):
return math.prod(self.conv_stride )
| 548
| 0
|
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = [[] for _ in range(__lowercase )]
a_ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(__lowercase ) <= key:
return input_string
for position, character in enumerate(__lowercase ):
a_ = position % (lowest * 2) # puts it in bounds
a_ = min(__lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__lowercase )
a_ = ["".join(__lowercase ) for row in temp_grid]
a_ = "".join(__lowercase )
return output_string
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = []
a_ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
a_ = [[] for _ in range(__lowercase )] # generates template
for position in range(len(__lowercase ) ):
a_ = position % (lowest * 2) # puts it in bounds
a_ = min(__lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
a_ = 0
for row in temp_grid: # fills in the characters
a_ = input_string[counter : counter + len(__lowercase )]
grid.append(list(__lowercase ) )
counter += len(__lowercase )
a_ = "" # reads as zigzag
for position in range(len(__lowercase ) ):
a_ = position % (lowest * 2) # puts it in bounds
a_ = min(__lowercase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = {}
for key_guess in range(1 , len(__lowercase ) ): # tries every key
a_ = decrypt(__lowercase , __lowercase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 483
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 670
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __lowercase ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Tuple = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE :List[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__SCREAMING_SNAKE_CASE :Dict = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__SCREAMING_SNAKE_CASE :Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__SCREAMING_SNAKE_CASE :List[Any] = False
@property
def snake_case__ ( self : Tuple ):
return 32
@property
def snake_case__ ( self : int ):
return 32
@property
def snake_case__ ( self : Optional[int] ):
return self.time_input_dim
@property
def snake_case__ ( self : Optional[int] ):
return self.time_input_dim * 4
@property
def snake_case__ ( self : Tuple ):
return 100
@property
def snake_case__ ( self : Dict ):
torch.manual_seed(0 )
__magic_name__ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ = UNetaDConditionModel(**__a )
return model
@property
def snake_case__ ( self : Optional[int] ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case__ ( self : str ):
torch.manual_seed(0 )
__magic_name__ = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case__ ( self : List[str] ):
__magic_name__ = self.dummy_unet
__magic_name__ = self.dummy_movq
__magic_name__ = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__magic_name__ = DDIMScheduler(**__a )
__magic_name__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case__ ( self : Tuple , a__ : List[str] , a__ : Optional[Any]=0 ):
__magic_name__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
__magic_name__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create init_image
__magic_name__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(__a ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
__magic_name__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(__a )
else:
__magic_name__ = torch.Generator(device=__a ).manual_seed(__a )
__magic_name__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case__ ( self : Dict ):
__magic_name__ = """cpu"""
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**__a )
__magic_name__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__magic_name__ = pipe(**self.get_dummy_inputs(__a ) )
__magic_name__ = output.images
__magic_name__ = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__magic_name__ = image[0, -3:, -3:, -1]
__magic_name__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Optional[int] ):
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__magic_name__ = init_image.resize((512, 512) )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__magic_name__ = torch.from_numpy(np.array(__a ) ).float() / 255.0
__magic_name__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__magic_name__ = """A robot, 4k photo"""
__magic_name__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__a )
__magic_name__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
__magic_name__ = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
__magic_name__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
__magic_name__ = pipe_prior(
__a , image=__a , strength=0.85 , generator=__a , negative_prompt='''''' , ).to_tuple()
__magic_name__ = pipeline(
image=__a , image_embeds=__a , negative_image_embeds=__a , hint=__a , generator=__a , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__a , __a )
| 720
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = """instructblip_vision_model"""
def __init__( self : List[Any] , a__ : Union[str, Any]=1408 , a__ : Tuple=6144 , a__ : Dict=39 , a__ : Union[str, Any]=16 , a__ : int=224 , a__ : str=14 , a__ : Any="gelu" , a__ : Dict=1E-6 , a__ : Tuple=0.0 , a__ : List[str]=1E-10 , a__ : List[Any]=True , **a__ : List[str] , ):
super().__init__(**a__ )
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = patch_size
__magic_name__ = image_size
__magic_name__ = initializer_range
__magic_name__ = attention_dropout
__magic_name__ = layer_norm_eps
__magic_name__ = hidden_act
__magic_name__ = qkv_bias
@classmethod
def snake_case__ ( cls : int , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ):
cls._set_token_in_kwargs(a__ )
__magic_name__ , __magic_name__ = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__magic_name__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :int = """instructblip_qformer"""
def __init__( self : str , a__ : Union[str, Any]=3_0522 , a__ : Any=768 , a__ : Tuple=12 , a__ : List[Any]=12 , a__ : Dict=3072 , a__ : Dict="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : str=0.02 , a__ : Tuple=1E-12 , a__ : str=0 , a__ : List[Any]="absolute" , a__ : Tuple=2 , a__ : Optional[int]=1408 , **a__ : Optional[int] , ):
super().__init__(pad_token_id=a__ , **a__ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = cross_attention_frequency
__magic_name__ = encoder_hidden_size
@classmethod
def snake_case__ ( cls : Optional[int] , a__ : Union[str, os.PathLike] , **a__ : List[Any] ):
cls._set_token_in_kwargs(a__ )
__magic_name__ , __magic_name__ = cls.get_config_dict(a__ , **a__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__magic_name__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[Any] = """instructblip"""
__SCREAMING_SNAKE_CASE :Dict = True
def __init__( self : List[Any] , a__ : Tuple=None , a__ : Union[str, Any]=None , a__ : Optional[int]=None , a__ : Tuple=32 , **a__ : Union[str, Any] ):
super().__init__(**a__ )
if vision_config is None:
__magic_name__ = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
__magic_name__ = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
__magic_name__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__magic_name__ = InstructBlipVisionConfig(**a__ )
__magic_name__ = InstructBlipQFormerConfig(**a__ )
__magic_name__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__magic_name__ = CONFIG_MAPPING[text_model_type](**a__ )
__magic_name__ = self.text_config.tie_word_embeddings
__magic_name__ = self.text_config.is_encoder_decoder
__magic_name__ = num_query_tokens
__magic_name__ = self.vision_config.hidden_size
__magic_name__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__magic_name__ = 1.0
__magic_name__ = 0.02
@classmethod
def snake_case__ ( cls : Optional[int] , a__ : InstructBlipVisionConfig , a__ : InstructBlipQFormerConfig , a__ : PretrainedConfig , **a__ : Dict , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **a__ , )
def snake_case__ ( self : Dict ):
__magic_name__ = copy.deepcopy(self.__dict__ )
__magic_name__ = self.vision_config.to_dict()
__magic_name__ = self.qformer_config.to_dict()
__magic_name__ = self.text_config.to_dict()
__magic_name__ = self.__class__.model_type
return output
| 245
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "data2vec-vision"
def __init__( self , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-1_2 , _a=2_2_4 , _a=1_6 , _a=3 , _a=False , _a=False , _a=False , _a=False , _a=0.1 , _a=0.1 , _a=True , _a=[3, 5, 7, 1_1] , _a=[1, 2, 3, 6] , _a=True , _a=0.4 , _a=2_5_6 , _a=1 , _a=False , _a=2_5_5 , **_a , ) -> List[str]:
super().__init__(**_a )
_a : Union[str, Any] = hidden_size
_a : Dict = num_hidden_layers
_a : Any = num_attention_heads
_a : Optional[Any] = intermediate_size
_a : Union[str, Any] = hidden_act
_a : str = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : List[str] = initializer_range
_a : str = layer_norm_eps
_a : List[str] = image_size
_a : str = patch_size
_a : Optional[int] = num_channels
_a : List[str] = use_mask_token
_a : Optional[Any] = use_absolute_position_embeddings
_a : Tuple = use_relative_position_bias
_a : Optional[int] = use_shared_relative_position_bias
_a : Any = layer_scale_init_value
_a : List[str] = drop_path_rate
_a : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
_a : str = out_indices
_a : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
_a : Optional[int] = use_auxiliary_head
_a : Optional[Any] = auxiliary_loss_weight
_a : List[Any] = auxiliary_channels
_a : str = auxiliary_num_convs
_a : List[str] = auxiliary_concat_input
_a : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase : List[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]=None) -> List[str]:
'''simple docstring'''
if subparsers is not None:
__UpperCamelCase : int = subparsers.add_parser("tpu-config" , description=_description)
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description)
# Core arguments
__UpperCamelCase : Optional[Any] = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`.")
config_args.add_argument(
"--config_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowerCamelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowerCamelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__UpperCamelCase : Tuple = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU.")
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowerCamelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it.")
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase)
return parser
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Dict:
'''simple docstring'''
__UpperCamelCase : List[str] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCamelCase):
__UpperCamelCase : Dict = load_config_from_file(args.config_file)
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCamelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCamelCase : int = defaults.commands
if not args.tpu_name:
__UpperCamelCase : int = defaults.tpu_name
if not args.tpu_zone:
__UpperCamelCase : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCamelCase : List[str] = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
__UpperCamelCase : Optional[int] = "accelerate -U"
elif isinstance(parse(args.accelerate_version) , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod.")
if args.command_file:
with open(args.command_file , "r") as f:
__UpperCamelCase : List[Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCamelCase : str = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
__UpperCamelCase : str = "; ".join(_lowerCamelCase)
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCamelCase : List[Any] = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(_lowerCamelCase)}')
return
subprocess.run(_lowerCamelCase)
print("Successfully setup pod.")
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = tpu_command_parser()
__UpperCamelCase : Optional[Any] = parser.parse_args()
tpu_command_launcher(_lowerCamelCase)
| 557
| 0
|
'''simple docstring'''
__snake_case = {
'''km/h''': 1.0,
'''m/s''': 3.6,
'''mph''': 1.60_9344,
'''knot''': 1.852,
}
__snake_case = {
'''km/h''': 1.0,
'''m/s''': 0.2_7777_7778,
'''mph''': 0.6_2137_1192,
'''knot''': 0.5_3995_6803,
}
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
UpperCamelCase__ :Optional[Any] = (
f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
f'''Valid values are: {", ".join(__a )}'''
)
raise ValueError(__a )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import torch
from torch import nn
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Dict = n_token
UpperCamelCase__ :List[Any] = d_embed
UpperCamelCase__ :Dict = d_proj
UpperCamelCase__ :Dict = cutoffs + [n_token]
UpperCamelCase__ :Union[str, Any] = [0] + self.cutoffs
UpperCamelCase__ :Any = div_val
UpperCamelCase__ :int = self.cutoffs[0]
UpperCamelCase__ :List[Any] = len(self.cutoffs ) - 1
UpperCamelCase__ :List[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase__ :Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase__ :Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase__ :Union[str, Any] = nn.ModuleList()
UpperCamelCase__ :str = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase_ , UpperCamelCase_ ) ) )
else:
self.out_projs.append(UpperCamelCase_ )
self.out_layers.append(nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase_ , UpperCamelCase_ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase_ , r_idx - l_idx ) )
UpperCamelCase__ :Tuple = keep_order
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if proj is None:
UpperCamelCase__ :List[str] = nn.functional.linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase__ :Any = nn.functional.linear(UpperCamelCase_ , proj.t().contiguous() )
UpperCamelCase__ :Union[str, Any] = nn.functional.linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase__ :Optional[Any] = hidden[..., :-1, :].contiguous()
UpperCamelCase__ :Optional[Any] = labels[..., 1:].contiguous()
UpperCamelCase__ :Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase__ :str = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCamelCase__ :int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase__ :Optional[int] = self._compute_logit(UpperCamelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase__ :int = labels != -100
UpperCamelCase__ :List[Any] = torch.zeros_like(UpperCamelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase__ :str = (
-nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase__ :Dict = nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ :Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :str = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ :Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ :Optional[Any] = self.out_layers[i].weight
UpperCamelCase__ :Optional[int] = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ :str = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase__ :List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase_ )
biases.append(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ :str = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
if labels is None:
UpperCamelCase__ :Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase__ :Any = torch.zeros_like(UpperCamelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase__ :Any = 0
UpperCamelCase__ :str = [0] + self.cutoffs
for i in range(len(UpperCamelCase_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase__ :Any = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase__ :int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase__ :Tuple = labels.index_select(0 , UpperCamelCase_ ) - l_idx
UpperCamelCase__ :str = head_logprob.index_select(0 , UpperCamelCase_ )
UpperCamelCase__ :int = hidden.index_select(0 , UpperCamelCase_ )
else:
UpperCamelCase__ :Dict = hidden
if i == 0:
if labels is not None:
UpperCamelCase__ :Any = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ :List[str] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ :str = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase__ :List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ :List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase__ :Dict = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.n_clusters == 0:
UpperCamelCase__ :Optional[Any] = self._compute_logit(UpperCamelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ :List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ :str = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ :List[Any] = self.out_layers[i].weight
UpperCamelCase__ :List[Any] = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ :Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase__ :Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase_ )
biases.append(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ :Optional[int] = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase__ :Union[str, Any] = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :int = [0] + self.cutoffs
for i in range(len(UpperCamelCase_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase__ :str = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ :List[str] = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Tuple = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :Optional[Any] = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase__ :Union[str, Any] = logprob_i
return out
| 280
| 0
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""torch""", """scipy"""]
def __init__( self , *__magic_name__ , **__magic_name__ ):
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def UpperCamelCase__ ( cls , *__magic_name__ , **__magic_name__ ):
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def UpperCamelCase__ ( cls , *__magic_name__ , **__magic_name__ ):
requires_backends(cls , ["""torch""", """scipy"""] )
| 681
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681
| 1
|
'''simple docstring'''
from torch import nn
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 10
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10
| 1
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a : int = numpy.array([0, 0])
a : Optional[Any] = numpy.array([0.5, 0.866_0254])
a : Tuple = numpy.array([1, 0])
a : List[str] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _SCREAMING_SNAKE_CASE ( _lowercase : list[numpy.ndarray] , _lowercase : int ) ->list[numpy.ndarray]:
'''simple docstring'''
a : List[str] = initial_vectors
for _ in range(_lowercase ):
a : Optional[Any] = iteration_step(_lowercase )
return vectors
def _SCREAMING_SNAKE_CASE ( _lowercase : list[numpy.ndarray] ) ->list[numpy.ndarray]:
'''simple docstring'''
a : Union[str, Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
a : str = vectors[i + 1]
new_vectors.append(_lowercase )
a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _SCREAMING_SNAKE_CASE ( _lowercase : numpy.ndarray , _lowercase : float ) ->numpy.ndarray:
'''simple docstring'''
a : int = numpy.radians(_lowercase )
a, a : Optional[int] = numpy.cos(_lowercase ), numpy.sin(_lowercase )
a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : list[numpy.ndarray] ) ->None:
'''simple docstring'''
a : Dict = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a, a : Any = zip(*_lowercase )
plt.plot(_lowercase , _lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Optional[int] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 633
| 0
|
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _UpperCAmelCase ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
@property
def A ( self : Optional[Any] ) -> Union[str, Any]:
return self.feat_extract_tester.prepare_feat_extract_dict()
def A ( self : List[Any] ) -> Optional[Any]:
lowercase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , '''feature_size''' ) )
self.assertTrue(hasattr(A_ , '''sampling_rate''' ) )
self.assertTrue(hasattr(A_ , '''padding_value''' ) )
def A ( self : str ) -> List[str]:
lowercase_ : Any = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Dict = feat_extract.model_input_names[0]
lowercase_ : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(A_ ) == len(A_ ) for x, y in zip(A_ , processed_features[input_name] ) ) )
lowercase_ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowercase_ : int = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
lowercase_ : Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowercase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : List[Any] = feat_extract.model_input_names[0]
lowercase_ : str = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
lowercase_ : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : List[str] = feat_extract.model_input_names[0]
lowercase_ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
lowercase_ : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def A ( self : int , A : Union[str, Any]=False ) -> Optional[Any]:
def _inputs_have_equal_length(A : int ):
lowercase_ : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(A : int , A : Optional[int] ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1e-3 ):
return False
return True
lowercase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
lowercase_ : Tuple = feat_extract.model_input_names[0]
lowercase_ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowercase_ : Dict = self.feat_extract_tester.seq_length_diff
lowercase_ : Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff
lowercase_ : Optional[int] = self.feat_extract_tester.min_seq_length
lowercase_ : List[str] = self.feat_extract_tester.batch_size
lowercase_ : str = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase_ : Optional[Any] = feat_extract.pad(A_ , padding=A_ )
lowercase_ : List[str] = input_a[input_name]
lowercase_ : Optional[Any] = feat_extract.pad(A_ , padding='''longest''' )
lowercase_ : List[Any] = input_a[input_name]
lowercase_ : List[str] = feat_extract.pad(A_ , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
lowercase_ : Optional[int] = input_a[input_name]
lowercase_ : Any = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''' )
lowercase_ : int = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='''max_length''' )[input_name]
lowercase_ : List[Any] = feat_extract.pad(
A_ , padding='''max_length''' , max_length=A_ , return_tensors='''np''' )
lowercase_ : List[str] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase_ : List[str] = feat_extract.pad(A_ , pad_to_multiple_of=10 )
lowercase_ : Union[str, Any] = input_a[input_name]
lowercase_ : int = feat_extract.pad(A_ , padding='''longest''' , pad_to_multiple_of=10 )
lowercase_ : List[Any] = input_a[input_name]
lowercase_ : Union[str, Any] = feat_extract.pad(
A_ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=A_ )
lowercase_ : str = input_a[input_name]
lowercase_ : str = feat_extract.pad(
A_ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=A_ , return_tensors='''np''' , )
lowercase_ : Any = input_a[input_name]
self.assertTrue(all(len(A_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
lowercase_ : int = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(A_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowercase_ : List[str] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def A ( self : List[Any] , A : Optional[int]=False ) -> Dict:
def _inputs_have_equal_length(A : Any ):
lowercase_ : List[Any] = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(A : str , A : str ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1e-3 ):
return False
return True
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
lowercase_ : List[str] = feat_extract.model_input_names[0]
lowercase_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowercase_ : Optional[int] = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=A_ )
lowercase_ : Union[str, Any] = input_a[input_name]
lowercase_ : str = feat_extract.pad(A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
lowercase_ : Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to smallest with np
lowercase_ : str = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=A_ , )
lowercase_ : List[Any] = input_a[input_name]
lowercase_ : Union[str, Any] = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
lowercase_ : Dict = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to middle
lowercase_ : Optional[int] = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=A_ , return_tensors='''np''' , )
lowercase_ : Any = input_a[input_name]
lowercase_ : Optional[int] = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=A_ )
lowercase_ : Tuple = input_a[input_name]
lowercase_ : Optional[Any] = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
lowercase_ : List[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='''longest''' , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='''longest''' , truncation=A_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='''max_length''' , truncation=A_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase_ : Any = 12
lowercase_ : Optional[int] = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , truncation=A_ , )
lowercase_ : str = input_a[input_name]
lowercase_ : Any = feat_extract.pad(
A_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , )
lowercase_ : Optional[int] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase_ : List[str] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowercase_ : Any = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
def A ( self : int ) -> str:
self._check_padding(numpify=A_ )
def A ( self : Optional[int] ) -> Optional[Any]:
self._check_padding(numpify=A_ )
def A ( self : List[Any] ) -> Dict:
self._check_truncation(numpify=A_ )
def A ( self : int ) -> str:
self._check_truncation(numpify=A_ )
@require_torch
def A ( self : Optional[Any] ) -> List[str]:
lowercase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : str = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ : Union[str, Any] = feat_extract.model_input_names[0]
lowercase_ : Tuple = BatchFeature({input_name: speech_inputs} )
lowercase_ : Optional[Any] = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase_ : Union[str, Any] = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def A ( self : int ) -> Tuple:
lowercase_ : int = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ : Tuple = feat_extract.model_input_names[0]
lowercase_ : Optional[int] = BatchFeature({input_name: speech_inputs} )
lowercase_ : Optional[Any] = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase_ : Optional[int] = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A ( self : Dict ) -> List[Any]:
lowercase_ : List[Any] = self.feat_extract_dict
lowercase_ : Union[str, Any] = True
lowercase_ : int = self.feature_extraction_class(**A_ )
lowercase_ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ : List[str] = [len(A_ ) for x in speech_inputs]
lowercase_ : Optional[Any] = feat_extract.model_input_names[0]
lowercase_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
lowercase_ : Optional[int] = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , A_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , A_ )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ : int = self.feat_extract_dict
lowercase_ : int = True
lowercase_ : int = self.feature_extraction_class(**A_ )
lowercase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ : int = [len(A_ ) for x in speech_inputs]
lowercase_ : List[str] = feat_extract.model_input_names[0]
lowercase_ : List[str] = BatchFeature({input_name: speech_inputs} )
lowercase_ : Any = min(A_ )
lowercase_ : List[str] = feat_extract.pad(
A_ , padding='''max_length''' , max_length=A_ , truncation=A_ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , A_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 705
|
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowercase ( ):
lowercase_ : int = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=__snake_case , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=__snake_case , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=__snake_case , default=4_2 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=__snake_case , default=0 , help='''cuda_id.''' , )
lowercase_ : Optional[Any] = parser.parse_args()
return args
def lowercase ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : str ):
if not len(__snake_case ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase_ , lowercase_ : Union[str, Any] = imgs[0].size
lowercase_ : Dict = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowercase_ , lowercase_ : str = grid.size
for i, img in enumerate(__snake_case ):
grid.paste(__snake_case , box=(i % cols * w, i // cols * h) )
return grid
def lowercase ( __snake_case : Dict , __snake_case : List[str]="robotic cat with wings" , __snake_case : Optional[int]=7.5 , __snake_case : Tuple=5_0 , __snake_case : Dict=1 , __snake_case : List[Any]=4_2 , ):
lowercase_ : Optional[Any] = torch.Generator(pipeline.device ).manual_seed(__snake_case )
lowercase_ : str = pipeline(
__snake_case , guidance_scale=__snake_case , num_inference_steps=__snake_case , generator=__snake_case , num_images_per_prompt=__snake_case , ).images
lowercase_ : Dict = int(math.sqrt(__snake_case ) )
lowercase_ : Union[str, Any] = image_grid(__snake_case , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__A : Union[str, Any] = parse_args()
# Load models and create wrapper for stable diffusion
__A : Any = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__A : int = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__A : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__A : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__A : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__A : int = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__A : List[Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__A : Tuple = unet.to(torch.device('''cuda''', args.cuda_id))
__A : str = pipeline.to(unet.device)
__A , __A : Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__A : Tuple = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 141
| 0
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
UpperCAmelCase = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
UpperCAmelCase = {
'''jukebox''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_LYRIC_TOKENS_SIZES
_UpperCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case , snake_case , snake_case=["v3", "v2", "v2"] , snake_case=512 , snake_case=5 , snake_case="<|endoftext|>" , **snake_case , ):
lowercase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
super().__init__(
unk_token=snake_case , n_genres=snake_case , version=snake_case , max_n_lyric_tokens=snake_case , **snake_case , )
lowercase = version
lowercase = max_n_lyric_tokens
lowercase = n_genres
with open(snake_case , encoding='utf-8' ) as vocab_handle:
lowercase = json.load(snake_case )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
lowercase = json.load(snake_case )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
lowercase = json.load(snake_case )
lowercase = r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowercase = oov.replace(r'\-\'' , r'\-+\'' )
lowercase = regex.compile(snake_case )
lowercase = {v: k for k, v in self.artists_encoder.items()}
lowercase = {v: k for k, v in self.genres_encoder.items()}
lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = [self.artists_encoder.get(snake_case , 0 ) for artist in list_artists]
for genres in range(len(snake_case ) ):
lowercase = [self.genres_encoder.get(snake_case , 0 ) for genre in list_genres[genres]]
lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowercase = [[self.lyrics_encoder.get(snake_case , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return list(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
lowercase , lowercase , lowercase = self.prepare_for_tokenization(snake_case , snake_case , snake_case )
lowercase = self._tokenize(snake_case )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowercase = artists[idx].lower()
lowercase = [genres[idx].lower()]
else:
lowercase = self._normalize(artists[idx] ) + '.v2'
lowercase = [
self._normalize(snake_case ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowercase = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
lowercase = {vocab[index]: index + 1 for index in range(len(snake_case ) )}
lowercase = 0
lowercase = len(snake_case ) + 1
lowercase = self.vocab
lowercase = {v: k for k, v in self.vocab.items()}
lowercase = ''
else:
lowercase = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
lowercase = self._run_strip_accents(snake_case )
lowercase = lyrics.replace('\\' , '\n' )
lowercase = self.out_of_vocab.sub('' , snake_case ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = unicodedata.normalize('NFD' , snake_case )
lowercase = []
for char in text:
lowercase = unicodedata.category(snake_case )
if cat == "Mn":
continue
output.append(snake_case )
return "".join(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = (
[chr(snake_case ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(snake_case ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(snake_case ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
lowercase = frozenset(snake_case )
lowercase = re.compile(r'_+' )
lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
lowercase = pattern.sub('_' , snake_case ).strip('_' )
return text
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return " ".join(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ):
# Convert to TensorType
if not isinstance(snake_case , snake_case ):
lowercase = TensorType(snake_case )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
lowercase = tf.constant
lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
lowercase = torch.tensor
lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
lowercase = jnp.array
lowercase = _is_jax
else:
lowercase = np.asarray
lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowercase = [inputs]
if not is_tensor(snake_case ):
lowercase = as_tensor(snake_case )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , snake_case , snake_case , snake_case="" , snake_case="pt" ):
lowercase = [0, 0, 0]
lowercase = [artist] * len(self.version )
lowercase = [genres] * len(self.version )
lowercase , lowercase , lowercase = self.tokenize(snake_case , snake_case , snake_case )
lowercase , lowercase , lowercase = self._convert_token_to_id(snake_case , snake_case , snake_case )
lowercase = [-INFINITY] * len(full_tokens[-1] )
lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=snake_case )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=snake_case ) )
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=snake_case ) )
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=snake_case ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.artists_decoder.get(snake_case )
lowercase = [self.genres_decoder.get(snake_case ) for genre in genres_index]
lowercase = [self.lyrics_decoder.get(snake_case ) for character in lyric_index]
return artist, genres, lyrics
| 84
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = "pytorch_model.bin"
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} ,)
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """A csv or a json file containing the validation data."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """The name of the task to train on."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default="""accuracy""" ,metadata={"""help""": """The evaluation metric used for the task."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default="""no""" ,metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=10 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 ,metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 ,metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=100 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Random seed for initialization."""} ,)
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
_snake_case : Optional[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_snake_case : Any = dataset.filter(lambda lowercase_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_snake_case : Any = int(eval_result * len(lowercase_ ) )
print(lowercase_ )
_snake_case : Optional[int] = dataset.sort('''probability''' , reverse=lowercase_ )
_snake_case : int = dataset.select(range(lowercase_ ) )
_snake_case : Union[str, Any] = dataset.remove_columns(['''label''', '''probability'''] )
_snake_case : int = dataset.rename_column('''prediction''' , '''label''' )
_snake_case : Optional[Any] = dataset.map(lambda lowercase_ : {"label": idalabel[example["label"]]} )
_snake_case : Tuple = dataset.shuffle(seed=args.seed )
_snake_case : Dict = os.path.join(lowercase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase_ , index=lowercase_ )
else:
dataset.to_json(lowercase_ )
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) -> Union[str, Any]:
_snake_case : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_snake_case : Optional[int] = STModelArguments(model_name_or_path=lowercase_ )
_snake_case : Optional[int] = STDataArguments(train_file=lowercase_ , infer_file=lowercase_ )
_snake_case : Union[str, Any] = STTrainingArguments(output_dir=lowercase_ )
_snake_case : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase_ ).items():
setattr(lowercase_ , lowercase_ , lowercase_ )
for key, value in kwargs.items():
if hasattr(lowercase_ , lowercase_ ):
setattr(lowercase_ , lowercase_ , lowercase_ )
# Sanity checks
_snake_case : Optional[Any] = {}
_snake_case : int = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_snake_case : int = args.train_file
_snake_case : str = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_snake_case : Optional[Any] = args.eval_file
for key in data_files:
_snake_case : Optional[Any] = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
_snake_case : int = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
_snake_case : Dict = f'''{args.output_dir}/self-train_iter-{{}}'''.format
_snake_case : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
accelerator.wait_for_everyone()
_snake_case : Dict = None
_snake_case : str = None
_snake_case : int = 0
_snake_case : Dict = False
# Show the progress bar
_snake_case : Any = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_snake_case : Union[str, Any] = data_dir_format(lowercase_ )
assert os.path.exists(lowercase_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_snake_case : List[Any] = os.path.join(lowercase_ , '''stage-1''' )
_snake_case : str = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase_ , lowercase_ ):
arguments_dict.update({key: value} )
_snake_case : List[Any] = os.path.join(lowercase_ , '''best-checkpoint''' , lowercase_ )
if os.path.exists(lowercase_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowercase_ , lowercase_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowercase_ )
finetune(**lowercase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase_ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowercase_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_snake_case : int = os.path.join(lowercase_ , '''best-checkpoint''' )
_snake_case : Any = os.path.join(lowercase_ , '''stage-2''' )
# Update arguments_dict
_snake_case : Dict = model_path
_snake_case : Union[str, Any] = data_files['''train''']
_snake_case : Optional[int] = current_output_dir
_snake_case : Dict = os.path.join(lowercase_ , '''best-checkpoint''' , lowercase_ )
if os.path.exists(lowercase_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowercase_ , lowercase_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowercase_ )
finetune(**lowercase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase_ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowercase_ )
_snake_case : List[Any] = iteration
_snake_case : Any = data_dir_format(iteration + 1 )
_snake_case : Optional[int] = AutoConfig.from_pretrained(os.path.join(lowercase_ , '''best-checkpoint''' ) )
_snake_case : Union[str, Any] = config.idalabel
_snake_case : Tuple = os.path.join(lowercase_ , '''eval_results_best-checkpoint.json''' )
_snake_case : Any = os.path.join(lowercase_ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowercase_ )
with open(lowercase_ , '''r''' ) as f:
_snake_case : Tuple = float(json.load(lowercase_ )[args.eval_metric] )
_snake_case : List[str] = os.path.join(lowercase_ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowercase_ )
# Loading the dataset from local csv or json files.
_snake_case : str = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
_snake_case : Dict = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
shutil.copy(lowercase_ , os.path.join(lowercase_ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase_ ):
shutil.copy(lowercase_ , os.path.join(lowercase_ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.wait_for_everyone()
_snake_case : Tuple = os.path.join(lowercase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_snake_case : Tuple = eval_result
if best_iteration is None:
_snake_case : Union[str, Any] = new_iteration
_snake_case : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_snake_case : Optional[int] = new_iteration
_snake_case : Optional[int] = new_eval_result
_snake_case : Optional[int] = 0
else:
if new_eval_result == best_eval_result:
_snake_case : int = new_iteration
_snake_case : str = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_snake_case : Dict = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowercase_ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase_ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase_ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase_ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase_ , '''eval_results_best-iteration.json''' ) , )
| 326
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
_lowerCAmelCase : int = 5
_lowerCAmelCase : str = 10
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : str = SpeechaTextTokenizer
_lowerCamelCase : Tuple = False
_lowerCamelCase : Dict = True
def lowercase ( self ):
super().setUp()
lowerCAmelCase : Dict = sp.SentencePieceProcessor()
spm_model.Load(snake_case__ )
lowerCAmelCase : int = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case__ ) )]
lowerCAmelCase : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : int = Path(self.tmpdirname )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
lowerCAmelCase : Tuple = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self ):
lowerCAmelCase : str = '<pad>'
lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 1001 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def lowercase ( self ):
lowerCAmelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def lowercase ( self ):
# fmt: off
lowerCAmelCase : int = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowerCAmelCase ( unittest.TestCase ):
_lowerCamelCase : Optional[Any] = """valhalla/s2t_mustc_multilinguial_medium"""
_lowerCamelCase : int = """C'est trop cool"""
_lowerCamelCase : List[str] = """Esto es genial"""
@classmethod
def lowercase ( cls ):
lowerCAmelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase ( self ):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def lowercase ( self ):
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def lowercase ( self ):
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
lowerCAmelCase : List[str] = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase : Union[str, Any] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCAmelCase : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = 'fr'
lowerCAmelCase : str = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase ( self ):
lowerCAmelCase : List[Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase : Dict = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 646
|
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646
| 1
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : list[int] , UpperCamelCase : list[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
_a = 0
print(UpperCamelCase , end=''',''' )
# Consider rest of the activities
for j in range(UpperCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCamelCase , end=''',''' )
_a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Optional[Any] = [1, 3, 0, 5, 8, 5]
_snake_case : Tuple = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 22
|
"""simple docstring"""
import functools
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[Any] = len(_A )
snake_case_ :Optional[int] = len(_A )
@functools.cache
def min_distance(_A, _A ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
snake_case_ :str = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1, _A ), 1 + min_distance(_A, indexa + 1 ), diff + min_distance(indexa + 1, indexa + 1 ), )
return min_distance(0, 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 584
| 0
|
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Tuple=False ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
snake_case__ : int = len(set_a.intersection(snake_case_ ) )
if alternative_union:
snake_case__ : List[Any] = len(snake_case_ ) + len(snake_case_ )
else:
snake_case__ : List[str] = len(set_a.union(snake_case_ ) )
return intersection / union
if isinstance(snake_case_ , (list, tuple) ) and isinstance(snake_case_ , (list, tuple) ):
snake_case__ : Dict = [element for element in set_a if element in set_b]
if alternative_union:
snake_case__ : int = len(snake_case_ ) + len(snake_case_ )
return len(snake_case_ ) / union
else:
snake_case__ : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(snake_case_ ) / len(snake_case_ )
return len(snake_case_ ) / len(snake_case_ )
return None
if __name__ == "__main__":
__lowerCamelCase : str = {"""a""", """b""", """c""", """d""", """e"""}
__lowerCamelCase : int = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 719
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25
| 0
|
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
snake_case_ : List[Any] = logging.get_logger(__name__)
def lowercase_ ( _lowercase : Tuple , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Tuple=None , _lowercase : List[Any]=None ):
'''simple docstring'''
if "." in tensor_name:
UpperCAmelCase : Optional[Any] = tensor_name.split("." )
for split in splits[:-1]:
UpperCAmelCase : int = getattr(_lowercase , _lowercase )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCAmelCase : Optional[int] = new_module
UpperCAmelCase : Union[str, Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCAmelCase : str = tensor_name in module._buffers
UpperCAmelCase : List[Any] = getattr(_lowercase , _lowercase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCAmelCase : Any = False
UpperCAmelCase : List[str] = False
if is_buffer or not is_bitsandbytes_available():
UpperCAmelCase : List[Any] = False
UpperCAmelCase : List[Any] = False
else:
UpperCAmelCase : Union[str, Any] = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCAmelCase : List[Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCAmelCase : int = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCAmelCase : List[str] = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
UpperCAmelCase : str = value.to("cpu" )
if value.dtype == torch.inta:
UpperCAmelCase : Any = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
UpperCAmelCase : str = torch.tensor(_lowercase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowercase ) and fpaa_statistics is None:
UpperCAmelCase : Union[str, Any] = new_value.T
UpperCAmelCase : Tuple = old_value.__dict__
if is_abit:
UpperCAmelCase : Optional[int] = bnb.nn.IntaParams(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
elif is_abit:
UpperCAmelCase : Tuple = bnb.nn.Paramsabit(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
UpperCAmelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCAmelCase : Any = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
UpperCAmelCase : Any = value.to(_lowercase )
else:
UpperCAmelCase : List[Any] = torch.tensor(_lowercase , device=_lowercase )
if is_buffer:
UpperCAmelCase : List[Any] = new_value
else:
UpperCAmelCase : List[Any] = nn.Parameter(_lowercase , requires_grad=old_value.requires_grad )
UpperCAmelCase : List[Any] = new_value
def lowercase_ ( _lowercase : str , _lowercase : Optional[int]=None , _lowercase : List[str]=None , _lowercase : Tuple=None , _lowercase : Union[str, Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase : Any = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase , nn.Linear ) or isinstance(_lowercase , _lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase , UpperCAmelCase : List[str] = module.weight.shape
else:
UpperCAmelCase : Dict = module.in_features
UpperCAmelCase : Optional[Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCAmelCase : List[Any] = bnb.nn.LinearabitLt(
_lowercase , _lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCAmelCase : Optional[Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCAmelCase : List[str] = bnb.nn.Linearabit(
_lowercase , _lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCAmelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCAmelCase : Union[str, Any] = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCAmelCase , UpperCAmelCase : str = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase , has_been_replaced=_lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _lowercase : List[str] , _lowercase : Union[str, Any]=None , _lowercase : Dict=None , _lowercase : Any=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
UpperCAmelCase , UpperCAmelCase : List[Any] = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowercase_ ( *_lowercase : List[str] , **_lowercase : List[str] ):
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _lowercase , )
return replace_with_bnb_linear(*_lowercase , **_lowercase )
def lowercase_ ( *_lowercase : Tuple , **_lowercase : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _lowercase , )
return set_module_quantized_tensor_to_device(*_lowercase , **_lowercase )
def lowercase_ ( _lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCAmelCase : Optional[Any] = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase : Optional[Any] = sum(_lowercase , [] )
UpperCAmelCase : List[str] = len(_lowercase ) > 0
# Check if it is a base model
UpperCAmelCase : Dict = not hasattr(_lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase : str = list(model.named_children() )
UpperCAmelCase : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase : Tuple = set(_lowercase ) - set(_lowercase )
UpperCAmelCase : Optional[int] = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCAmelCase : List[Any] = [".weight", ".bias"]
UpperCAmelCase : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase : List[Any] = name.replace(_lowercase , "" )
filtered_module_names.append(_lowercase )
return filtered_module_names
| 595
|
"""simple docstring"""
from math import ceil
def lowercase_ ( _lowercase : Any , _lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : List[str] = list(range(0 , _lowercase ) )
UpperCAmelCase : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCAmelCase : Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowercase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowercase )
# Missing blocks
UpperCAmelCase : Union[str, Any] = [i for i in blocks if i not in device_map_blocks]
UpperCAmelCase : Optional[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowercase ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(_lowercase ) )
if len(_lowercase ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(_lowercase ) )
if len(_lowercase ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(_lowercase ) )
def lowercase_ ( _lowercase : Union[str, Any] , _lowercase : int ):
'''simple docstring'''
UpperCAmelCase : str = list(range(_lowercase ) )
UpperCAmelCase : Tuple = int(ceil(n_layers / len(_lowercase ) ) )
UpperCAmelCase : Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , _lowercase , _lowercase )]
return dict(zip(_lowercase , _lowercase ) )
| 595
| 1
|
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( A__ ):
def __init__( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : int = 50 , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase__ , )
UpperCAmelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCamelCase__ ), "This is a local test"
| 717
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __magic_name__ ( A__ ):
lowercase : "DiagonalGaussianDistribution"
class __magic_name__ ( A__, A__ ):
lowercase : Union[str, Any] =True
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase__ : Tuple[int] = (64,) , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : int = 4 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : float = 0.1_82_15 , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase = Encoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , down_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , act_fn=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , double_z=UpperCamelCase__ , )
# pass init params to Decoder
UpperCAmelCase = Decoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , up_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , act_fn=UpperCamelCase__ , )
UpperCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
UpperCAmelCase = False
UpperCAmelCase = False
# only relevant if vae tiling is enabled
UpperCAmelCase = self.config.sample_size
UpperCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase = 0.25
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=False ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , (Encoder, Decoder) ):
UpperCAmelCase = value
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : bool = True ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = use_tiling
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.enable_tiling(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = True
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCAmelCase = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase__ , "set_processor" ):
UpperCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , UpperCamelCase__ , UpperCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return processors
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(UpperCamelCase__ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : List[Any] ):
if hasattr(UpperCamelCase__ , "set_processor" ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.set_processor(UpperCamelCase__ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , UpperCamelCase__ , UpperCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase__ , return_dict=UpperCamelCase__ )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase = [self.encoder(UpperCamelCase__ ) for x_slice in x.split(1 )]
UpperCAmelCase = torch.cat(UpperCamelCase__ )
else:
UpperCAmelCase = self.encoder(UpperCamelCase__ )
UpperCAmelCase = self.quant_conv(UpperCamelCase__ )
UpperCAmelCase = DiagonalGaussianDistribution(UpperCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase__ , return_dict=UpperCamelCase__ )
UpperCAmelCase = self.post_quant_conv(UpperCamelCase__ )
UpperCAmelCase = self.decoder(UpperCamelCase__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
@apply_forward_hook
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase = [self._decode(UpperCamelCase__ ).sample for z_slice in z.split(1 )]
UpperCAmelCase = torch.cat(UpperCamelCase__ )
else:
UpperCAmelCase = self._decode(UpperCamelCase__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = min(a.shape[2] , b.shape[2] , UpperCamelCase__ )
for y in range(UpperCamelCase__ ):
UpperCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = min(a.shape[3] , b.shape[3] , UpperCamelCase__ )
for x in range(UpperCamelCase__ ):
UpperCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
UpperCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase = []
for i in range(0 , x.shape[2] , UpperCamelCase__ ):
UpperCAmelCase = []
for j in range(0 , x.shape[3] , UpperCamelCase__ ):
UpperCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase = self.encoder(UpperCamelCase__ )
UpperCAmelCase = self.quant_conv(UpperCamelCase__ )
row.append(UpperCamelCase__ )
rows.append(UpperCamelCase__ )
UpperCAmelCase = []
for i, row in enumerate(UpperCamelCase__ ):
UpperCAmelCase = []
for j, tile in enumerate(UpperCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase__ , UpperCamelCase__ )
if j > 0:
UpperCAmelCase = self.blend_h(row[j - 1] , UpperCamelCase__ , UpperCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase__ , dim=3 ) )
UpperCAmelCase = torch.cat(UpperCamelCase__ , dim=2 )
UpperCAmelCase = DiagonalGaussianDistribution(UpperCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase = []
for i in range(0 , z.shape[2] , UpperCamelCase__ ):
UpperCAmelCase = []
for j in range(0 , z.shape[3] , UpperCamelCase__ ):
UpperCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase = self.post_quant_conv(UpperCamelCase__ )
UpperCAmelCase = self.decoder(UpperCamelCase__ )
row.append(UpperCamelCase__ )
rows.append(UpperCamelCase__ )
UpperCAmelCase = []
for i, row in enumerate(UpperCamelCase__ ):
UpperCAmelCase = []
for j, tile in enumerate(UpperCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase__ , UpperCamelCase__ )
if j > 0:
UpperCAmelCase = self.blend_h(row[j - 1] , UpperCamelCase__ , UpperCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase__ , dim=3 ) )
UpperCAmelCase = torch.cat(UpperCamelCase__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase = sample
UpperCAmelCase = self.encode(UpperCamelCase__ ).latent_dist
if sample_posterior:
UpperCAmelCase = posterior.sample(generator=UpperCamelCase__ )
else:
UpperCAmelCase = posterior.mode()
UpperCAmelCase = self.decode(UpperCamelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
| 457
| 0
|
'''simple docstring'''
from itertools import permutations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : tuple ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a_ : List[Any] = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : int = 10 ):
"""simple docstring"""
return sum(
int("""""".join(map(UpperCamelCase__ , UpperCamelCase__ ) ) )
for num in permutations(range(UpperCamelCase__ ) )
if is_substring_divisible(UpperCamelCase__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 442
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
lowerCAmelCase_ : int = {'target_lang': 'fi', 'source_lang': 'en'}
lowerCAmelCase_ : str = '>>zh<<'
lowerCAmelCase_ : List[str] = 'Helsinki-NLP/'
if is_torch_available():
lowerCAmelCase_ : Dict = 'pt'
elif is_tf_available():
lowerCAmelCase_ : Union[str, Any] = 'tf'
else:
lowerCAmelCase_ : int = 'jax'
@require_sentencepiece
class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
__magic_name__ : Dict = MarianTokenizer
__magic_name__ : Any = False
__magic_name__ : str = True
def lowercase_ ( self : Any ):
'''simple docstring'''
super().setUp()
a_ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
a_ : Optional[int] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
a_ : List[str] = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
a_ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Tuple , **lowercase__ : int ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def lowercase_ ( self : int , lowercase__ : int ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Optional[int] = """</s>"""
a_ : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : str = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
a_ : Any = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
a_ : str = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
a_ : Union[str, Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
a_ : Union[str, Any] = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : int = self.get_tokenizer()
a_ : Dict = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : List[str] = self.get_tokenizer()
a_ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
a_ : Optional[int] = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : Tuple = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
a_ : Tuple = """Tämä on testi"""
a_ : Union[str, Any] = """This is a test"""
a_ : Union[str, Any] = [76, 7, 2047, 2]
a_ : Optional[int] = [69, 12, 11, 940, 2]
a_ : Optional[Any] = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
a_ : Optional[int] = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
a_ : str = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 442
| 1
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __A(lowerCAmelCase , lowerCAmelCase ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def __A(lowerCAmelCase , lowerCAmelCase ) -> str:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_UpperCamelCase = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
_UpperCamelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
_UpperCamelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = dct.pop(lowerCAmelCase )
_UpperCamelCase = val
def __A(lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_UpperCamelCase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_UpperCamelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def __A(lowerCAmelCase , lowerCAmelCase ) -> str:
"""simple docstring"""
_UpperCamelCase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase )
_UpperCamelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_UpperCamelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
_UpperCamelCase = 1_0_2_4
_UpperCamelCase = 4_0_9_6
_UpperCamelCase = 2_4
_UpperCamelCase = 1_6
_UpperCamelCase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase = False
_UpperCamelCase = """relu"""
_UpperCamelCase = 1_0_2_4
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
# load HuggingFace model
_UpperCamelCase = ViTModel(lowerCAmelCase , add_pooling_layer=lowerCAmelCase )
_UpperCamelCase = TrOCRForCausalLM(lowerCAmelCase )
_UpperCamelCase = VisionEncoderDecoderModel(encoder=lowerCAmelCase , decoder=lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
_UpperCamelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase )["""model"""]
_UpperCamelCase = create_rename_keys(lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_UpperCamelCase = state_dict.pop(lowerCAmelCase )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_UpperCamelCase = val
else:
_UpperCamelCase = val
# load state dict
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image
_UpperCamelCase = ViTImageProcessor(size=encoder_config.image_size )
_UpperCamelCase = RobertaTokenizer.from_pretrained("""roberta-large""" )
_UpperCamelCase = TrOCRProcessor(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = processor(images=prepare_img(lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
# verify logits
_UpperCamelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_UpperCamelCase = model(pixel_values=lowerCAmelCase , decoder_input_ids=lowerCAmelCase )
_UpperCamelCase = outputs.logits
_UpperCamelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 717
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger()
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : nn.Module
UpperCamelCase_ : List[nn.Module] = field(default_factory=__lowercase )
UpperCamelCase_ : list = field(default_factory=__lowercase )
def A_ ( self , a , a , a ) -> str:
'''simple docstring'''
_UpperCamelCase = len(list(m.modules() ) ) == 1 or isinstance(a , nn.Convad ) or isinstance(a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a )
def __call__( self , a ) -> Optional[int]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a )
[x.remove() for x in self.handles]
return self
@property
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
return list(filter(lambda a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : nn.Module
UpperCamelCase_ : nn.Module
UpperCamelCase_ : int = 0
UpperCamelCase_ : List = field(default_factory=__lowercase )
UpperCamelCase_ : List = field(default_factory=__lowercase )
def __call__( self , a ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = Tracker(self.dest )(a ).parametrized
_UpperCamelCase = Tracker(self.src )(a ).parametrized
_UpperCamelCase = list(filter(lambda a : type(a ) not in self.src_skip , a ) )
_UpperCamelCase = list(filter(lambda a : type(a ) not in self.dest_skip , a ) )
if len(a ) != len(a ):
raise Exception(
F'Numbers of operations are different. Source module has {len(a )} operations while'
F' destination module has {len(a )}.' )
for dest_m, src_m in zip(a , a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True ) -> Optional[Any]:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
_UpperCamelCase = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase ).eval()
_UpperCamelCase = ResNetForImageClassification(lowerCAmelCase ).eval()
_UpperCamelCase = ModuleTransfer(src=lowerCAmelCase , dest=lowerCAmelCase )
_UpperCamelCase = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowerCAmelCase )
assert torch.allclose(from_model(lowerCAmelCase ) , our_model(lowerCAmelCase ).logits ), "The model logits don't match the original one."
_UpperCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase , )
# we can use the convnext one
_UpperCamelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __A(lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = """imagenet-1k-id2label.json"""
_UpperCamelCase = 1_0_0_0
_UpperCamelCase = (1, num_labels)
_UpperCamelCase = """huggingface/label-files"""
_UpperCamelCase = num_labels
_UpperCamelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_UpperCamelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = partial(lowerCAmelCase , num_labels=lowerCAmelCase , idalabel=lowerCAmelCase , labelaid=lowerCAmelCase )
_UpperCamelCase = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase , names_to_config[model_name] , lowerCAmelCase , lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 202
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.