code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int=1 , lowercase_ : str=False ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase : Any = n_token
UpperCAmelCase : Union[str, Any] = d_embed
UpperCAmelCase : Union[str, Any] = d_proj
UpperCAmelCase : List[Any] = cutoffs + [n_token]
UpperCAmelCase : List[str] = [0] + self.cutoffs
UpperCAmelCase : str = div_val
UpperCAmelCase : Dict = self.cutoffs[0]
UpperCAmelCase : str = len(self.cutoffs ) - 1
UpperCAmelCase : Dict = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCAmelCase : List[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase : str = nn.ModuleList()
UpperCAmelCase : Dict = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_ ) ) )
else:
self.out_projs.append(lowercase_ )
self.out_layers.append(nn.Linear(lowercase_ , lowercase_ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase , UpperCAmelCase : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_ ) ) )
self.out_layers.append(nn.Linear(lowercase_ , r_idx - l_idx ) )
UpperCAmelCase : Optional[int] = keep_order
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str ) -> int:
if proj is None:
UpperCAmelCase : Any = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase : Optional[int] = nn.functional.linear(lowercase_ , proj.t().contiguous() )
UpperCAmelCase : Dict = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : str , lowercase_ : List[str]=None , lowercase_ : str=False ) -> Dict:
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase : Any = hidden[..., :-1, :].contiguous()
UpperCAmelCase : int = labels[..., 1:].contiguous()
UpperCAmelCase : Union[str, Any] = hidden.view(-1 , hidden.size(-1 ) )
UpperCAmelCase : Optional[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
UpperCAmelCase : int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase : Union[str, Any] = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCAmelCase : Dict = labels != -100
UpperCAmelCase : Any = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase : Optional[Any] = (
-nn.functional.log_softmax(lowercase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase : Dict = nn.functional.log_softmax(lowercase_ , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase : List[str] = self.out_layers[i].weight
UpperCAmelCase : int = self.out_layers[i].bias
if i == 0:
UpperCAmelCase : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase : str = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase : str = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = nn.functional.log_softmax(lowercase_ , dim=1 )
if labels is None:
UpperCAmelCase : str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase : int = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase : Dict = 0
UpperCAmelCase : int = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
UpperCAmelCase , UpperCAmelCase : List[Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase : Dict = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase : Any = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase : List[Any] = labels.index_select(0 , lowercase_ ) - l_idx
UpperCAmelCase : Union[str, Any] = head_logprob.index_select(0 , lowercase_ )
UpperCAmelCase : List[str] = hidden.index_select(0 , lowercase_ )
else:
UpperCAmelCase : Union[str, Any] = hidden
if i == 0:
if labels is not None:
UpperCAmelCase : Optional[int] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase : Dict = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase : Dict = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = nn.functional.log_softmax(lowercase_ , dim=1 )
UpperCAmelCase : Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase : Tuple = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase : str = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase : Tuple = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowercase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[Any] ) -> List[str]:
if self.n_clusters == 0:
UpperCAmelCase : str = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowercase_ , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase : int = self.out_layers[i].weight
UpperCAmelCase : Optional[Any] = self.out_layers[i].bias
if i == 0:
UpperCAmelCase : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase : int = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase : List[str] = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase : int = nn.functional.log_softmax(lowercase_ , dim=1 )
UpperCAmelCase : List[Any] = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
UpperCAmelCase , UpperCAmelCase : Dict = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase : int = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase : List[Any] = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = nn.functional.log_softmax(lowercase_ , dim=1 )
UpperCAmelCase : int = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase : Any = logprob_i
return out
| 151
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151
| 1
|
from __future__ import annotations
from collections import namedtuple
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
"""simple docstring"""
UpperCamelCase : Union[str, Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
import math
def A_ ( _lowerCAmelCase ) -> list:
UpperCamelCase : str = [True] * n
UpperCamelCase : Optional[int] = False
UpperCamelCase : str = False
UpperCamelCase : List[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase : int = i * 2
while index < n:
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Optional[Any] = index + i
UpperCamelCase : Optional[Any] = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase = 9999_6666_3333 ) -> int:
UpperCamelCase : Tuple = math.floor(math.sqrt(_lowerCAmelCase ) ) + 100
UpperCamelCase : Dict = prime_sieve(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Tuple = 0
UpperCamelCase : List[str] = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase : Dict = primes[prime_index + 1]
UpperCamelCase : Any = last_prime**2
UpperCamelCase : Union[str, Any] = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase : Dict = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase : List[str] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase : Dict = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase : str = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 140
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] =ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : str = (3, 32, 128)
__snake_case : Optional[int] = tempfile.mkdtemp()
# fmt: off
__snake_case : Optional[int] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__snake_case : Optional[int] = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
__snake_case : int = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
__snake_case : List[str] = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__snake_case : int = Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Any = self.get_image_processor()
__snake_case : List[Any] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Union[str, Any] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__snake_case : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
__snake_case : List[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : Any = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Union[str, Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = image_processor(UpperCAmelCase , return_tensors="np" )
__snake_case : Union[str, Any] = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : Dict = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : int = "test"
__snake_case : Tuple = processor(text=UpperCAmelCase )
__snake_case : List[Any] = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : List[str] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Any = "test"
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Any = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Optional[Any] = processor.char_decode(UpperCAmelCase )
__snake_case : Dict = tokenizer.batch_decode(UpperCAmelCase )
__snake_case : List[str] = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Dict = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Union[str, Any] = None
__snake_case : int = self.prepare_image_inputs()
__snake_case : Optional[Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : int = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__snake_case : Dict = torch.randn(1 , 27 , 38 )
__snake_case : List[Any] = torch.randn(1 , 27 , 50257 )
__snake_case : List[str] = torch.randn(1 , 27 , 30522 )
__snake_case : Dict = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 326
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : Any = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCAmelCase__( lowercase : Dict ) -> Dict:
__snake_case : Any = [1, 2]
__snake_case : Dict = {"a": 1, "b": 2}
__snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]}
__snake_case : int = {"a": {"1": 1}, "b": 2}
__snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case : Dict = [2, 3]
__snake_case : Tuple = {"a": 2, "b": 3}
__snake_case : int = {"a": [2, 3], "b": [4, 5]}
__snake_case : Dict = {"a": {"1": 2}, "b": 3}
__snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
| 326
| 1
|
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = '▁'
__lowercase = {'vocab_file': 'prophetnet.tokenizer'}
__lowercase = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
__lowercase = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
__lowercase = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def lowercase ( A_ )-> int:
'''simple docstring'''
a : int = collections.OrderedDict()
with open(snake_case_ , "r" , encoding="utf-8" ) as reader:
a : Optional[int] = reader.readlines()
for index, token in enumerate(snake_case_ ):
a : str = token.rstrip("\n" )
a : str = index
return vocab
class _A ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict="[SEP]" , __UpperCAmelCase : Optional[int]="[SEP]" , __UpperCAmelCase : Optional[int]="[SEP]" , __UpperCAmelCase : List[str]="[UNK]" , __UpperCAmelCase : Union[str, Any]="[PAD]" , __UpperCAmelCase : str="[CLS]" , __UpperCAmelCase : Optional[Any]="[MASK]" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Tuple , ):
a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_lowercase))
a : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
a : str = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
a : Tuple = f'''[unused{i}]'''
a : Dict = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
a : int = 12
a : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_lowercase)
def __getstate__( self : Dict):
a : str = self.__dict__.copy()
a : Any = None
return state
def __setstate__( self : List[Any] , __UpperCAmelCase : List[Any]):
a : int = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a : str = {}
a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is None:
return ([0] * len(_lowercase)) + [1]
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1]
def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
a : Dict = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def __snake_case ( self : Optional[Any]):
return len(self.sp_model) + self.fairseq_offset
def __snake_case ( self : str):
a : Any = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __snake_case ( self : str , __UpperCAmelCase : str):
return self.sp_model.encode(_lowercase , out_type=_lowercase)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Optional[Any]):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a : Optional[int] = self.sp_model.PieceToId(_lowercase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case ( self : Dict , __UpperCAmelCase : List[str]):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Any):
a : List[str] = "".join(_lowercase).replace(_lowercase , " ").strip()
return out_string
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
if not os.path.isdir(_lowercase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
a : Any = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , "wb") as fi:
a : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
a : Optional[Any] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 370
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ , A_=False )-> int:
'''simple docstring'''
a : List[str] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase ( A_ , A_ , A_=False )-> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
a : Tuple = ""
else:
a : Dict = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : str = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
a : Tuple = in_proj_bias[: config.hidden_size]
a : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : int = in_proj_weight[
-config.hidden_size :, :
]
a : int = in_proj_bias[-config.hidden_size :]
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def lowercase ( A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
a : List[Any] = dct.pop(A_ )
a : str = val
def lowercase ( )-> List[Any]:
'''simple docstring'''
a : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Tuple = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=A_ , )
a : Union[str, Any] = ViTHybridConfig(backbone_config=A_ , image_size=384 , num_labels=1_000 )
a : Optional[Any] = False
# load original model from timm
a : Any = timm.create_model(A_ , pretrained=A_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(A_ )
a : int = create_rename_keys(A_ , A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ , A_ )
a : Union[str, Any] = "huggingface/label-files"
a : Optional[int] = "imagenet-1k-id2label.json"
a : str = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Optional[Any] = {int(A_ ): v for k, v in idalabel.items()}
a : str = idalabel
a : Any = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
a : List[Any] = ViTHybridModel(A_ ).eval()
else:
a : Optional[int] = ViTHybridForImageClassification(A_ ).eval()
model.load_state_dict(A_ )
# create image processor
a : Tuple = create_transform(**resolve_data_config({} , model=A_ ) )
a : List[Any] = transform.transforms
a : int = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
a : str = ViTHybridImageProcessor(
do_resize=A_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=A_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
a : List[Any] = prepare_img()
a : Optional[Any] = transform(A_ ).unsqueeze(0 )
a : str = processor(A_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(A_ , A_ )
# verify logits
with torch.no_grad():
a : Dict = model(A_ )
a : Tuple = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
a : str = timm_model.forward_features(A_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A_ , outputs.pooler_output , atol=1e-3 )
else:
a : int = timm_model(A_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
__lowercase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226
| 0
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_UpperCamelCase : Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_UpperCamelCase : Any = [0, 25, 50]
_UpperCamelCase : List[Any] = [25, 50, 75]
_UpperCamelCase : str = fuzz.membership.trimf(X, abca)
_UpperCamelCase : str = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_UpperCamelCase : List[str] = np.ones(75)
_UpperCamelCase : Optional[Any] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_UpperCamelCase : Dict = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_UpperCamelCase : Tuple = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_UpperCamelCase : int = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_UpperCamelCase : Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_UpperCamelCase : Optional[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_UpperCamelCase : int = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_UpperCamelCase : Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 77
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = args.pruning_method
lowercase__ : Tuple = args.threshold
lowercase__ : str = args.model_name_or_path.rstrip('/' )
lowercase__ : List[Any] = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowercase__ : Optional[Any] = torch.load(os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
lowercase__ : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ : Tuple = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase__ : List[str] = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowercase__ : Optional[Any] = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase__ : Optional[Any] = MagnitudeBinarizer.apply(inputs=_lowerCAmelCase , threshold=_lowerCAmelCase )
lowercase__ : Optional[int] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ : Optional[Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Any = TopKBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ : Any = name[:-6]
lowercase__ : Optional[Any] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Tuple = ThresholdBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[str] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ : Union[str, Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ , lowercase__ : Tuple = -0.1, 1.1
lowercase__ : Optional[Any] = torch.sigmoid(_lowerCAmelCase )
lowercase__ : Optional[Any] = s * (r - l) + l
lowercase__ : Optional[Any] = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowercase__ : Union[str, Any] = os.path.join(
os.path.dirname(_lowerCAmelCase ) , f"""bertarized_{os.path.basename(_lowerCAmelCase )}""" )
if not os.path.isdir(_lowerCAmelCase ):
shutil.copytree(_lowerCAmelCase , _lowerCAmelCase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_UpperCamelCase : Dict = parser.parse_args()
main(args)
| 77
| 1
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , ):
a :List[str] = parent
a :Dict = 13
a :Optional[int] = 7
a :Optional[Any] = 30
a :Optional[Any] = self.seq_length + self.mem_len
a :Tuple = 15
a :List[str] = True
a :List[Any] = True
a :List[Any] = 99
a :Optional[Any] = [10, 50, 80]
a :Optional[int] = 32
a :List[Any] = 32
a :Dict = 4
a :List[Any] = 8
a :Optional[Any] = 128
a :Dict = 2
a :List[Any] = 2
a :str = None
a :str = 1
a :List[Any] = 0
a :List[str] = 3
a :str = self.vocab_size - 1
a :Optional[Any] = 0.01
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :Tuple = None
if self.use_labels:
a :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def SCREAMING_SNAKE_CASE__ ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :int = TFTransfoXLModel(_lowerCamelCase )
a , a :List[Any] = model(_lowerCamelCase ).to_tuple()
a :List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
a , a :Optional[int] = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :str = TFTransfoXLLMHeadModel(_lowerCamelCase )
a , a :Tuple = model(_lowerCamelCase ).to_tuple()
a :Any = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
a , a :Dict = model(_lowerCamelCase ).to_tuple()
a , a :Dict = model([input_ids_a, mems_a] ).to_tuple()
a :str = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
a , a :Any = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[Any] = TFTransfoXLForSequenceClassification(_lowerCamelCase )
a :Any = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a)) :Optional[int] = config_and_inputs
a :Union[str, Any] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE__ = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = TFTransfoXLModelTester(self )
a :str = ConfigTester(self , config_class=_lowerCamelCase , d_embed=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
self.model_tester.set_seed()
a :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
self.model_tester.set_seed()
a :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :Any = self.model_tester.prepare_config_and_inputs_for_common()
a :int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a :Any = model_class(_lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
a :Dict = model.get_output_embeddings()
assert isinstance(_lowerCamelCase , tf.keras.layers.Layer )
a :Dict = model.get_bias()
assert name is None
else:
a :int = model.get_output_embeddings()
assert x is None
a :Optional[int] = model.get_bias()
assert name is None
def SCREAMING_SNAKE_CASE__ ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a :List[Any] = TFTransfoXLModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@require_tf
class _snake_case ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
a :Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a :List[Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a :Optional[Any] = model.generate(_lowerCamelCase , max_length=200 , do_sample=_lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
| 281
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : Any = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 281
| 1
|
from __future__ import annotations
import numpy as np
def _UpperCAmelCase (UpperCamelCase__ : list[float] ):
return np.maximum(0 , UpperCamelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 11
|
"""simple docstring"""
import os
from collections.abc import Iterator
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
for dir_path, dir_names, filenames in os.walk(A_ ):
lowerCAmelCase__ : str = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A_ )[1] in (".py", ".ipynb"):
yield os.path.join(A_ , A_ ).lstrip('''./''' )
def __SCREAMING_SNAKE_CASE ( A_ ):
return f'{i * " "}*' if i else "\n##"
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A_ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A_ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
lowerCAmelCase__ : Any = ''''''
for filepath in sorted(good_file_paths(A_ ) ):
lowerCAmelCase__ ,lowerCAmelCase__ : str = os.path.split(A_ )
if filepath != old_path:
lowerCAmelCase__ : str = print_path(A_ , A_ )
lowerCAmelCase__ : str = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase__ : Union[str, Any] = f'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
lowerCAmelCase__ : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'{md_prefix(A_ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 106
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = '''ZinengTang/tvlt-base'''
UpperCAmelCase = tempfile.mkdtemp()
def a__( self : Dict , **lowerCAmelCase : Dict )-> int:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **_SCREAMING_SNAKE_CASE )
def a__( self : Optional[int] , **lowerCAmelCase : List[str] )-> Tuple:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_SCREAMING_SNAKE_CASE )
def a__( self : List[str] )-> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : str )-> Any:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = TvltProcessor(image_processor=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = TvltProcessor(image_processor=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase = np.ones([12000] )
UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
UpperCAmelCase = processor(audio=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__( self : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = TvltProcessor(image_processor=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase = np.ones([3, 224, 224] )
UpperCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
UpperCAmelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = TvltProcessor(image_processor=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase = np.ones([12000] )
UpperCAmelCase = np.ones([3, 224, 224] )
UpperCAmelCase = processor(audio=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_feature_extractor()
UpperCAmelCase = TvltProcessor(image_processor=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 355
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 91
| 0
|
from importlib import import_module
from .logging import get_logger
UpperCAmelCase__ = get_logger(__name__)
class lowercase_ :
'''simple docstring'''
def __init__( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str=None ) ->int:
"""simple docstring"""
a = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
a = module._original_module if isinstance(__UpperCAmelCase , _PatchedModuleObj ) else module
class lowercase_ :
'''simple docstring'''
__snake_case = []
def __init__( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=None ) ->Union[str, Any]:
"""simple docstring"""
a = obj
a = target
a = new
a = target.split('''.''' )[0]
a = {}
a = attrs or []
def __enter__( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
*a , a = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__UpperCAmelCase ) ):
try:
a = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a = getattr(self.obj , __UpperCAmelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__UpperCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
a = obj_attr
# patch at top level
setattr(self.obj , __UpperCAmelCase , _PatchedModuleObj(__UpperCAmelCase , attrs=self.attrs ) )
a = getattr(self.obj , __UpperCAmelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__UpperCAmelCase , __UpperCAmelCase , _PatchedModuleObj(getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , attrs=self.attrs ) )
a = getattr(__UpperCAmelCase , __UpperCAmelCase )
# finally set the target attribute
setattr(__UpperCAmelCase , __UpperCAmelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a = getattr(import_module('''.'''.join(__UpperCAmelCase ) ) , __UpperCAmelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __UpperCAmelCase ) is attr_value:
a = getattr(self.obj , __UpperCAmelCase )
setattr(self.obj , __UpperCAmelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a = globals()['''__builtins__'''][target_attr]
setattr(self.obj , __UpperCAmelCase , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : List[str] , *__UpperCAmelCase : Optional[int] ) ->Tuple:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , __UpperCAmelCase , self.original.pop(__UpperCAmelCase ) )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 0
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCAmelCase = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
_UpperCAmelCase = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
_UpperCAmelCase = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
_UpperCAmelCase = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=0.9 , lowercase=3 , lowercase=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
A_ : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowercase ) , word_tokenize(lowercase ) , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
else:
A_ : Optional[Any] = [
meteor_score.single_meteor_score(lowercase , lowercase , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
return {"meteor": np.mean(lowercase )}
| 140
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A ( __UpperCAmelCase ):
__snake_case = 'trocr'
__snake_case = ['past_key_values']
__snake_case = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=1024, UpperCamelCase__=12, UpperCamelCase__=16, UpperCamelCase__=4096, UpperCamelCase__="gelu", UpperCamelCase__=512, UpperCamelCase__=0.1, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=0.0, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = d_model
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = init_std
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = scale_embedding
lowerCAmelCase_ = use_learned_position_embeddings
lowerCAmelCase_ = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, **UpperCamelCase__, )
| 167
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = ['pixel_values']
def __init__( self, UpperCamelCase__ = True, UpperCamelCase__ = 32, UpperCamelCase__=PILImageResampling.BILINEAR, UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = size_divisor
lowerCAmelCase_ = resample
super().__init__(**UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase_ = height // size_divisor * size_divisor
lowerCAmelCase_ = width // size_divisor * size_divisor
lowerCAmelCase_ = resize(UpperCamelCase__, (new_h, new_w), resample=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
return image
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
return rescale(image=UpperCamelCase__, scale=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = ChannelDimension.FIRST, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCAmelCase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
lowerCAmelCase_ = [self.resize(UpperCamelCase__, size_divisor=UpperCamelCase__, resample=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(UpperCamelCase__, scale=1 / 255 ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(UpperCamelCase__, UpperCamelCase__ ) for image in images]
lowerCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__, tensor_type=UpperCamelCase__ )
| 167
| 1
|
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _A ( lowercase , lowercase=() , lowercase=None , lowercase="no" , lowercase="29500" ):
"""simple docstring"""
a =False
a =False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
a =True
elif "IPython" in sys.modules:
a ='''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
a =PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
a =8
a =PrepareForLaunch(lowercase , distributed_type='''TPU''' )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(lowercase , args=lowercase , nprocs=lowercase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*lowercase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowercase , master_addr='''127.0.01''' , master_port=lowercase , mixed_precision=lowercase ):
a =PrepareForLaunch(lowercase , distributed_type='''MULTI_GPU''' )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(lowercase , args=lowercase , nprocs=lowercase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
a ='''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*lowercase )
def _A ( lowercase , lowercase=() , lowercase=2 ):
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowercase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
a =PrepareForLaunch(lowercase , debug=lowercase )
start_processes(lowercase , args=lowercase , nprocs=lowercase , start_method='''fork''' )
| 81
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = b.T
__UpperCAmelCase : Any = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
__UpperCAmelCase : int = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
__UpperCAmelCase : Optional[int] = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = x.reshape(-1 , 3 )
__UpperCAmelCase : Optional[int] = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ["""pixel_values"""]
def __init__( self : str , a_ : Optional[Union[List[List[int]], np.ndarray]] = None , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : bool = True , **a_ : List[str] , ):
'''simple docstring'''
super().__init__(**a_ )
__UpperCAmelCase : Optional[int] = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
__UpperCAmelCase : List[str] = get_size_dict(a_ )
__UpperCAmelCase : str = np.array(a_ ) if clusters is not None else None
__UpperCAmelCase : Dict = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = resample
__UpperCAmelCase : Tuple = do_normalize
__UpperCAmelCase : Optional[int] = do_color_quantize
def snake_case__ ( self : Optional[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
a_ , size=(size['''height'''], size['''width''']) , resample=a_ , data_format=a_ , **a_ )
def snake_case__ ( self : Tuple , a_ : np.ndarray , a_ : Optional[Union[str, ChannelDimension]] = None , ):
'''simple docstring'''
__UpperCAmelCase : Dict = rescale(image=a_ , scale=1 / 1_2_7.5 , data_format=a_ )
__UpperCAmelCase : Union[str, Any] = image - 1
return image
def snake_case__ ( self : int , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : Optional[bool] = None , a_ : Optional[Union[List[List[int]], np.ndarray]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **a_ : Any , ):
'''simple docstring'''
__UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : List[str] = size if size is not None else self.size
__UpperCAmelCase : Any = get_size_dict(a_ )
__UpperCAmelCase : Optional[int] = resample if resample is not None else self.resample
__UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : int = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase : Optional[int] = clusters if clusters is not None else self.clusters
__UpperCAmelCase : Any = np.array(a_ )
__UpperCAmelCase : Optional[int] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase : List[Any] = [to_numpy_array(a_ ) for image in images]
if do_resize:
__UpperCAmelCase : List[str] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_normalize:
__UpperCAmelCase : Dict = [self.normalize(image=a_ ) for image in images]
if do_color_quantize:
__UpperCAmelCase : int = [to_channel_dimension_format(a_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase : List[str] = np.array(a_ )
__UpperCAmelCase : Dict = color_quantize(a_ , a_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase : Any = images.shape[0]
__UpperCAmelCase : Any = images.reshape(a_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase : List[Any] = list(a_ )
else:
__UpperCAmelCase : int = [to_channel_dimension_format(a_ , a_ ) for image in images]
__UpperCAmelCase : int = {'''input_ids''': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 226
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
A_ : List[str] = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
A_ : List[Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
A_ : List[str] = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
A_ : int = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
A_ : Dict = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
A_ : Union[str, Any] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
A_ : Dict = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
A_ : Optional[Any] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
A_ : Dict = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
A_ : Any = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
A_ : Union[str, Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
A_ : Optional[int] = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
A_ : List[Any] = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
A_ : str = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
A_ : Tuple = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
A_ : List[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
A_ : Union[str, Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
A_ : Optional[int] = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
A_ : Union[str, Any] = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
A_ : Union[str, Any] = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
A_ : List[str] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
A_ : Any = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
A_ : Any = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
A_ : Optional[int] = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : Tuple = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A_ : List[Any] = key.split(""".""" )
A_ : Union[str, Any] = int(key_split[2] ), int(key_split[4] )
A_ : Dict = config.vision_config.hidden_size
if "weight" in key:
A_ : Union[str, Any] = val[:dim, :]
A_ : Tuple = val[dim : dim * 2, :]
A_ : Union[str, Any] = val[-dim:, :]
else:
A_ : Union[str, Any] = val[:dim]
A_ : str = val[dim : dim * 2]
A_ : Optional[int] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A_ : str = key.split(""".""" )
A_ : Optional[Any] = int(key_split[3] )
A_ : str = config.text_config.hidden_size
if "weight" in key:
A_ : Optional[Any] = val[:dim, :]
A_ : Dict = val[
dim : dim * 2, :
]
A_ : Optional[int] = val[-dim:, :]
else:
A_ : Optional[int] = val[:dim]
A_ : Optional[Any] = val[dim : dim * 2]
A_ : Tuple = val[-dim:]
else:
A_ : List[Any] = rename_key(__SCREAMING_SNAKE_CASE )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A_ : int = val.squeeze_()
else:
A_ : List[Any] = val
return orig_state_dict
def a ( ):
'''simple docstring'''
A_ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ : Union[str, Any] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="groupvit-gcc-yfcc" , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = GroupViTConfig()
A_ : List[str] = GroupViTModel(__SCREAMING_SNAKE_CASE ).eval()
A_ : int = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )["model"]
A_ : Optional[int] = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A_ : Any = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__SCREAMING_SNAKE_CASE ) == 0)
# verify result
A_ : List[Any] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
A_ : Union[str, Any] = prepare_img()
A_ : Dict = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
with torch.no_grad():
A_ : Dict = model(**__SCREAMING_SNAKE_CASE )
if model_name == "groupvit-gcc-yfcc":
A_ : Any = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
A_ : List[str] = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , __SCREAMING_SNAKE_CASE , atol=1E-3 )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print("""Successfully saved processor and model to""" , __SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(__SCREAMING_SNAKE_CASE , organization="""nielsr""" )
model.push_to_hub(__SCREAMING_SNAKE_CASE , organization="""nielsr""" )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 370
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCAmelCase :
@property
def _a (self ):
return self.get_dummy_input()
@property
def _a (self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _a (self , lowercase=True , lowercase=False , lowercase=False , lowercase=False , ):
A_ : List[str] = 4
A_ : int = 32
A_ : Optional[int] = (32, 32)
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : int = torch.device(lowercase )
A_ : int = (batch_size, num_channels) + sizes
A_ : Optional[int] = randn_tensor(lowercase , generator=lowercase , device=lowercase )
A_ : Union[str, Any] = {"""hidden_states""": hidden_states}
if include_temb:
A_ : str = 128
A_ : List[Any] = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
A_ : List[str] = torch.manual_seed(1 )
A_ : int = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
A_ : List[str] = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
A_ : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def _a (self ):
A_ : Tuple = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A_ : Any = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self , lowercase ):
A_, A_ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
A_ : int = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
A_ : List[str] = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ : int = output[0, -1, -3:, -3:]
A_ : List[Any] = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def _a (self ):
A_, A_ : Tuple = self.prepare_init_args_and_inputs_for_common()
A_ : List[str] = self.block_class(**lowercase )
model.to(lowercase )
model.train()
A_ : Any = model(**lowercase )
if isinstance(lowercase , lowercase ):
A_ : str = output[0]
A_ : Union[str, Any] = torch.device(lowercase )
A_ : Tuple = randn_tensor(output.shape , device=lowercase )
A_ : List[str] = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward()
| 135
| 0
|
def lowerCAmelCase_ ( _snake_case : str ) -> list:
'''simple docstring'''
if n_term == "":
return []
__magic_name__ : list = []
for temp in range(int(_snake_case ) ):
series.append(F'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
snake_case : Tuple = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 281
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : List[str] = {"vocab_file": "spiece.model"}
snake_case : List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
snake_case : Tuple = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
snake_case : List[str] = "▁"
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ : str = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__magic_name__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Dict = do_lower_case
__magic_name__ : Tuple = remove_space
__magic_name__ : Union[str, Any] = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : List[str] = self.__dict__.copy()
__magic_name__ : Any = None
return state
def __setstate__( self , _a ):
__magic_name__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : str = {}
__magic_name__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.remove_space:
__magic_name__ : List[Any] = " ".join(inputs.strip().split() )
else:
__magic_name__ : str = inputs
__magic_name__ : int = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__magic_name__ : str = unicodedata.normalize("NFKD" , _a )
__magic_name__ : Tuple = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__magic_name__ : int = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = self.preprocess_text(_a )
__magic_name__ : Dict = self.sp_model.encode(_a , out_type=_a )
__magic_name__ : Any = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__magic_name__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : List[str] = cur_pieces[1:]
else:
__magic_name__ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = ""
__magic_name__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : List[Any] = True
__magic_name__ : Optional[int] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[int] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : List[str] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 281
| 1
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=16 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=2 , lowerCamelCase=32 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=30 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = d_model
__a = d_model
__a = decoder_layers
__a = decoder_layers
__a = decoder_ffn_dim
__a = decoder_attention_heads
__a = decoder_attention_heads
__a = eos_token_id
__a = bos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = use_cache
__a = max_position_embeddings
__a = None
__a = decoder_seq_length
__a = 2
__a = 1
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__a = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = True
__a = TrOCRDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
__a = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_snake_case : Optional[int] = (TrOCRForCausalLM,) if is_torch_available() else ()
_snake_case : str = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
_snake_case : Optional[Any] = True
_snake_case : Any = False
def a__ ( self ):
__a = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase )
__a = ConfigTester(self , config_class=lowerCamelCase )
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
pass
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase )
def a__ ( self ):
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a__ ( self ):
pass
| 355
|
"""simple docstring"""
def _lowerCamelCase( a , a , a , a ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__a = mf_knapsack(i - 1 , a , a , a )
else:
__a = max(
mf_knapsack(i - 1 , a , a , a ) , mf_knapsack(i - 1 , a , a , j - wt[i - 1] ) + val[i - 1] , )
__a = val
return f[i][j]
def _lowerCamelCase( a , a , a , a ):
__a = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__a = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__a = dp[i - 1][w_]
return dp[n][w_], dp
def _lowerCamelCase( a , a , a ):
if not (isinstance(a , (list, tuple) ) and isinstance(a , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
__a = len(a )
if num_items != len(a ):
__a = (
"The number of weights must be the same as the number of values.\n"
F"But got {num_items} weights and {len(a )} values"
)
raise ValueError(a )
for i in range(a ):
if not isinstance(wt[i] , a ):
__a = (
"All weights must be integers but got weight of "
F"type {type(wt[i] )} at index {i}"
)
raise TypeError(a )
__a , __a = knapsack(a , a , a , a )
__a = set()
_construct_solution(a , a , a , a , a )
return optimal_val, example_optional_set
def _lowerCamelCase( a , a , a , a , a ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(a , a , i - 1 , a , a )
else:
optimal_set.add(a )
_construct_solution(a , a , i - 1 , j - wt[i - 1] , a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = [3, 2, 4, 4]
SCREAMING_SNAKE_CASE__:List[str] = [4, 3, 2, 3]
SCREAMING_SNAKE_CASE__:List[str] = 4
SCREAMING_SNAKE_CASE__:List[str] = 6
SCREAMING_SNAKE_CASE__:Optional[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__:Optional[int] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__:Optional[Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 268
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[str] = "▁"
lowerCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase : Tuple = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
lowerCamelCase : str = {
"facebook/mbart-large-50-one-to-many-mmt": 1_0_2_4,
}
# fmt: off
lowerCamelCase : Dict = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = ['input_ids', 'attention_mask']
A__ = []
A__ = []
def __init__( self : int , _a : Tuple , _a : Optional[int]=None , _a : str=None , _a : Tuple="</s>" , _a : List[str]="</s>" , _a : Any="<s>" , _a : Dict="<unk>" , _a : Optional[Any]="<pad>" , _a : Optional[int]="<mask>" , _a : Optional[Dict[str, Any]] = None , **_a : List[Any] , ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
_SCREAMING_SNAKE_CASE =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_a , tgt_lang=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_SCREAMING_SNAKE_CASE =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_SCREAMING_SNAKE_CASE ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =len(self.sp_model )
_SCREAMING_SNAKE_CASE ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_a )
}
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.lang_code_to_id.items()}
_SCREAMING_SNAKE_CASE =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
_SCREAMING_SNAKE_CASE =src_lang if src_lang is not None else 'en_XX'
_SCREAMING_SNAKE_CASE =self.lang_code_to_id[self._src_lang]
_SCREAMING_SNAKE_CASE =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A ( self : Dict ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A ( self : List[Any] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A ( self : Optional[Any] , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self : Dict , _a : Dict ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Dict , _a : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_a , out_type=_a )
def A ( self : str , _a : str ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_SCREAMING_SNAKE_CASE =self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self : Optional[int] , _a : int ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : List[Any] , _a : str ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =[]
else:
current_sub_tokens.append(_a )
_SCREAMING_SNAKE_CASE =False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def A ( self : Optional[int] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , 'wb' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def A ( self : str , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
_SCREAMING_SNAKE_CASE =[1] * len(self.prefix_tokens )
_SCREAMING_SNAKE_CASE =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def A ( self : List[str] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Union[str, Any] , _a : Union[str, Any] , _a : str , _a : Optional[str] , _a : Optional[str] , **_a : Optional[Any] ) -> int:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_SCREAMING_SNAKE_CASE =src_lang
_SCREAMING_SNAKE_CASE =self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
_SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(_a )
_SCREAMING_SNAKE_CASE =tgt_lang_id
return inputs
def A ( self : Optional[Any] , _a : List[str] , _a : str = "en_XX" , _a : Optional[List[str]] = None , _a : str = "ro_RO" , **_a : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =src_lang
_SCREAMING_SNAKE_CASE =tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def A ( self : int ) -> List[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : Tuple , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.lang_code_to_id[src_lang]
_SCREAMING_SNAKE_CASE =[self.cur_lang_code_id]
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
def A ( self : Optional[int] , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.lang_code_to_id[tgt_lang]
_SCREAMING_SNAKE_CASE =[self.cur_lang_code_id]
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
| 47
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('''foo.json''',)])
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''')
SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_model_config(lowercase_)
SCREAMING_SNAKE_CASE_ : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = GenerationConfig()
SCREAMING_SNAKE_CASE_ : Any = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = generation_config.update(**lowercase_)
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {'''foo''': '''bar'''})
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig()
SCREAMING_SNAKE_CASE_ : List[str] = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir:
generation_config.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = GenerationConfig.from_pretrained(lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig.from_model_config(lowercase_)
assert not hasattr(lowercase_ , '''foo''') # no new kwargs should be initialized if from config
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , lowercase_)
self.assertEqual(default_config.num_beams , 1)
SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , lowercase_)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str]):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''')
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='''test-generation-config''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Optional[int] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
| 91
| 0
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCAmelCase__ : List[str] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowerCAmelCase__ : List[str] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowerCAmelCase__ : Optional[Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Dict=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(UpperCAmelCase_ , UpperCAmelCase_ )["wer"]
else:
__UpperCAmelCase : str = 0
__UpperCAmelCase : Union[str, Any] = 0
for prediction, reference in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = compute_measures(UpperCAmelCase_ , UpperCAmelCase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 37
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase__ : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __UpperCamelCase ( ):
__UpperCAmelCase : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
__UpperCAmelCase : Union[str, Any] = g.get_repo("huggingface/transformers" )
__UpperCAmelCase : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
__UpperCAmelCase : int = sorted([comment for comment in issue.get_comments()], key=lambda _UpperCAmelCase : i.created_at, reverse=_UpperCAmelCase )
__UpperCAmelCase : Any = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 37
| 1
|
"""simple docstring"""
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A_ : Optional[Any] = remove_duplicates(key.upper() )
A_ : List[Any] = len(_UpperCAmelCase )
# First fill cipher with key characters
A_ : int = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
A_ : int = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A_ : str = alphabet[i - offset]
A_ : int = char
return cipher_alphabet
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Dict = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def lowercase_ ( ):
"""simple docstring"""
A_ : str = input('''Enter message to encode or decode: ''' ).strip()
A_ : Dict = input('''Enter keyword: ''' ).strip()
A_ : List[Any] = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
A_ : Dict = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
A_ : Tuple = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 167
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase):
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Tuple = tempfile.mkdtemp()
# fmt: off
A_ : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
A_ : Tuple = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
A_ : Tuple = {'''unk_token''': '''<unk>'''}
A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
A_ : str = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
A_ : str = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def a_ ( self : Any , **_lowerCamelCase : Dict ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : Dict , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : List[str] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A_ : Dict = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : int = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Optional[Any] = self.get_image_processor()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Dict = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : Optional[int] = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : int = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : int = processor(text=_lowerCamelCase )
A_ : Any = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[Any] = self.get_tokenizer()
A_ : Tuple = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : Any = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[int] = processor(images=_lowerCamelCase , visual_prompt=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : List[str] = processor.batch_decode(_lowerCamelCase )
A_ : str = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 167
| 1
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None ) -> List[str]:
"""simple docstring"""
a = None
if token is not None:
a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
a = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
a = requests.get(snake_case_, headers=snake_case_ ).json()
a = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
a = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(snake_case_ ):
a = requests.get(url + f"""&page={i + 2}""", headers=snake_case_ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None ) -> Union[str, Any]:
"""simple docstring"""
a = None
if token is not None:
a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
a = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
a = requests.get(snake_case_, headers=snake_case_ ).json()
a = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
a = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(snake_case_ ):
a = requests.get(url + f"""&page={i + 2}""", headers=snake_case_ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
a = None
if token is not None:
a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
a = requests.get(snake_case_, headers=snake_case_, allow_redirects=snake_case_ )
a = result.headers['''Location''']
a = requests.get(snake_case_, allow_redirects=snake_case_ )
a = os.path.join(snake_case_, f"""{artifact_name}.zip""" )
with open(snake_case_, '''wb''' ) as fp:
fp.write(response.content )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None ) -> int:
"""simple docstring"""
a = []
a = []
a = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
a = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a = line[: line.index(''': ''' )]
a = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
a = line[len('''FAILED ''' ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
a = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` """
f"""and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
''' problem.''' )
a = None
if job_name and job_links:
a = job_links.get(snake_case_, snake_case_ )
# A list with elements of the form (line of error, error, failed test)
a = [x + [y] + [job_link] for x, y in zip(snake_case_, snake_case_ )]
return result
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None ) -> Optional[int]:
"""simple docstring"""
a = []
a = [os.path.join(snake_case_, snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_, job_links=snake_case_ ) )
return errors
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None ) -> str:
"""simple docstring"""
a = Counter()
counter.update([x[1] for x in logs] )
a = counter.most_common()
a = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
a = dict(sorted(r.items(), key=lambda snake_case_ : item[1]["count"], reverse=snake_case_ ) )
return r
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
a = test.split('''/''' )[2]
else:
a = None
return test
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None ) -> Any:
"""simple docstring"""
a = [(x[0], x[1], get_model(x[2] )) for x in logs]
a = [x for x in logs if x[2] is not None]
a = {x[2] for x in logs}
a = {}
for test in tests:
a = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a = counter.most_common()
a = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a = sum(error_counts.values() )
if n_errors > 0:
a = {'''count''': n_errors, '''errors''': error_counts}
a = dict(sorted(r.items(), key=lambda snake_case_ : item[1]["count"], reverse=snake_case_ ) )
return r
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
a = '''| no. | error | status |'''
a = '''|-:|:-|:-|'''
a = [header, sep]
for error in reduced_by_error:
a = reduced_by_error[error]['''count''']
a = f"""| {count} | {error[:1_0_0]} | |"""
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict:
"""simple docstring"""
a = '''| model | no. of errors | major error | count |'''
a = '''|-:|-:|-:|-:|'''
a = [header, sep]
for model in reduced_by_model:
a = reduced_by_model[model]['''count''']
a , a = list(reduced_by_model[model]['''errors'''].items() )[0]
a = f"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
UpperCamelCase__ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCamelCase__ : Union[str, Any] = get_job_links(args.workflow_run_id, token=args.token)
UpperCamelCase__ : str = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCamelCase__ : Tuple = k.find(""" / """)
UpperCamelCase__ : Tuple = k[index + len(""" / """) :]
UpperCamelCase__ : Dict = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCamelCase__ : str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCamelCase__ : List[str] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCamelCase__ : List[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCamelCase__ : Tuple = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCamelCase__ : Union[str, Any] = reduce_by_error(errors)
UpperCamelCase__ : Any = reduce_by_model(errors)
UpperCamelCase__ : Dict = make_github_table(reduced_by_error)
UpperCamelCase__ : Any = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 330
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = pd.read_csv("""sample_data.csv""", header=None)
UpperCamelCase__ : Tuple = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase__ : List[Any] = df.iloc[:, 1:2]
UpperCamelCase__ : Union[str, Any] = actual_data.values.reshape(len_data, 1)
UpperCamelCase__ : List[Any] = MinMaxScaler().fit_transform(actual_data)
UpperCamelCase__ : Optional[Any] = 10
UpperCamelCase__ : int = 5
UpperCamelCase__ : List[str] = 20
UpperCamelCase__ : Optional[int] = len_data - periods * look_back
UpperCamelCase__ : Union[str, Any] = actual_data[:division]
UpperCamelCase__ : str = actual_data[division - look_back :]
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = [], []
UpperCamelCase__ , UpperCamelCase__ : str = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase__ : List[str] = np.array(train_x)
UpperCamelCase__ : Optional[Any] = np.array(test_x)
UpperCamelCase__ : Tuple = np.array([list(i.ravel()) for i in train_y])
UpperCamelCase__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y])
UpperCamelCase__ : Union[str, Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
UpperCamelCase__ : Tuple = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase__ : Tuple = model.predict(x_test)
| 330
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 105
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 135
| 0
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = mock.Mock()
a :int = 500
a :Optional[int] = {}
a :Tuple = HTTPError
a :Dict = {}
# Download this model to make sure it's in the cache.
a :Optional[Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=a_ ) as mock_head:
a :List[str] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = mock.Mock()
a :Optional[Any] = 500
a :str = {}
a :Tuple = HTTPError
a :str = {}
# Download this model to make sure it's in the cache.
a :int = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=a_ ) as mock_head:
a :Union[str, Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ):
try:
a :str = tempfile.mktemp()
with open(a_ , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , a_ )
a :Union[str, Any] = AlbertTokenizer.from_pretrained(a_ )
finally:
os.remove(a_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , a_ )
a :Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class _snake_case ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
a :Optional[Any] = TOKEN
HfFolder.save_token(a_ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
a :List[str] = os.path.join(a_ , '''vocab.txt''' )
with open(a_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
a :Any = BertTokenizer(a_ )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
a :Union[str, Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a_ , repo_id='''test-tokenizer''' , push_to_hub=a_ , use_auth_token=self._token )
a :List[str] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def SCREAMING_SNAKE_CASE__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
a :Dict = os.path.join(a_ , '''vocab.txt''' )
with open(a_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
a :Dict = BertTokenizer(a_ )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
a :Dict = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a_ , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=a_ , use_auth_token=self._token )
a :Optional[int] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
a :Tuple = os.path.join(a_ , '''vocab.txt''' )
with open(a_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
a :Any = CustomTokenizer(a_ )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
a :Union[str, Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
a :Dict = os.path.join(a_ , '''vocab.txt''' )
with open(a_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
a :List[Any] = BertTokenizerFast.from_pretrained(a_ )
bert_tokenizer.save_pretrained(a_ )
a :Tuple = CustomTokenizerFast.from_pretrained(a_ )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
a :List[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
a :Any = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=a_ , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = Trie()
a :Optional[Any] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a_ , ['''AB''', '''C'''] )
| 359
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'microsoft/speecht5_tts'
SCREAMING_SNAKE_CASE__ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
SCREAMING_SNAKE_CASE__ = 'text_reader'
SCREAMING_SNAKE_CASE__ = SpeechTaProcessor
SCREAMING_SNAKE_CASE__ = SpeechTaForTextToSpeech
SCREAMING_SNAKE_CASE__ = SpeechTaHifiGan
SCREAMING_SNAKE_CASE__ = ['text']
SCREAMING_SNAKE_CASE__ = ['audio']
def SCREAMING_SNAKE_CASE__ ( self ):
if self.post_processor is None:
a :List[Any] = '''microsoft/speecht5_hifigan'''
super().setup()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None ):
a :Tuple = self.pre_processor(text=_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
a :List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
a :int = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
with torch.no_grad():
return self.post_processor(_lowerCamelCase ).cpu().detach()
| 281
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''detr'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=100 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : str="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = backbone_config.get("model_type" )
UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : str = use_timm_backbone
UpperCAmelCase_ : Optional[Any] = backbone_config
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Dict = num_queries
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : Any = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = encoder_layers
UpperCAmelCase_ : Optional[int] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_ffn_dim
UpperCAmelCase_ : Tuple = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : List[Any] = dropout
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : int = activation_dropout
UpperCAmelCase_ : List[str] = activation_function
UpperCAmelCase_ : Optional[int] = init_std
UpperCAmelCase_ : Union[str, Any] = init_xavier_std
UpperCAmelCase_ : List[str] = encoder_layerdrop
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : Any = auxiliary_loss
UpperCAmelCase_ : Optional[int] = position_embedding_type
UpperCAmelCase_ : List[str] = backbone
UpperCAmelCase_ : int = use_pretrained_backbone
UpperCAmelCase_ : Any = dilation
# Hungarian matcher
UpperCAmelCase_ : str = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : List[str] = mask_loss_coefficient
UpperCAmelCase_ : Dict = dice_loss_coefficient
UpperCAmelCase_ : Any = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : int = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self.d_model
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple ) -> List[Any]:
return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, any]:
UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : Any = self.__class__.model_type
return output
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return 12
| 268
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(__A , (list, tuple) ) or not all(
isinstance(__A , __A ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(__A ) ):
# update the maximum and minimum subarray products
_SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = min_till_now, max_till_now
_SCREAMING_SNAKE_CASE = max(__A , max_till_now * number )
_SCREAMING_SNAKE_CASE = min(__A , min_till_now * number )
# update the maximum product found till now
_SCREAMING_SNAKE_CASE = max(__A , __A )
return max_prod
| 111
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCamelCase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE = torchvision.models.resnetaaa(pretrained=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(model.children() )[:-2]
_SCREAMING_SNAKE_CASE = nn.Sequential(*__lowerCamelCase )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
_SCREAMING_SNAKE_CASE = self.pool(self.model(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.flatten(__lowerCamelCase , start_dim=2 )
_SCREAMING_SNAKE_CASE = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowercase_ ( A ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [json.loads(__lowerCamelCase ) for l in open(__lowerCamelCase )]
_SCREAMING_SNAKE_CASE = os.path.dirname(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer
_SCREAMING_SNAKE_CASE = labels
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = max_seq_length
_SCREAMING_SNAKE_CASE = transforms
def __len__( self : int ):
"""simple docstring"""
return len(self.data )
def __getitem__( self : str , __lowerCamelCase : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = sentence[0], sentence[1:-1], sentence[-1]
_SCREAMING_SNAKE_CASE = sentence[: self.max_seq_length]
_SCREAMING_SNAKE_CASE = torch.zeros(self.n_classes )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
_SCREAMING_SNAKE_CASE = self.transforms(__lowerCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Dict:
_SCREAMING_SNAKE_CASE = [len(row["sentence"] ) for row in batch]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = len(__A ), max(__A )
_SCREAMING_SNAKE_CASE = torch.zeros(__A , __A , dtype=torch.long )
_SCREAMING_SNAKE_CASE = torch.zeros(__A , __A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__A , __A ) ):
_SCREAMING_SNAKE_CASE = input_row["sentence"]
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = torch.stack([row["image"] for row in batch] )
_SCREAMING_SNAKE_CASE = torch.stack([row["label"] for row in batch] )
_SCREAMING_SNAKE_CASE = torch.stack([row["image_start_token"] for row in batch] )
_SCREAMING_SNAKE_CASE = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 111
| 1
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(UpperCamelCase ) / len(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCAmelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
_lowerCAmelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
_lowerCAmelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return float((preds == labels).mean() )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ):
"""simple docstring"""
lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {}
for id_pred, label in zip(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCAmelCase__ : Dict = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ : Optional[int] = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase )
lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" )
fas.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) )
ems.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase )
lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,)
def UpperCAmelCase_ ( self ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" )
elif self.config_name == "record":
lowerCAmelCase__ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 37
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = "▁"
_lowerCamelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCamelCase : Any = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowerCamelCase : List[Any] = {
"xlm-roberta-base": 5_1_2,
"xlm-roberta-large": 5_1_2,
"xlm-roberta-large-finetuned-conll02-dutch": 5_1_2,
"xlm-roberta-large-finetuned-conll02-spanish": 5_1_2,
"xlm-roberta-large-finetuned-conll03-english": 5_1_2,
"xlm-roberta-large-finetuned-conll03-german": 5_1_2,
}
class __snake_case (lowercase__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : Union[str, Any]="</s>" , _UpperCAmelCase : Union[str, Any]="<s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : Union[str, Any]="<pad>" , _UpperCAmelCase : int="<mask>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_lowerCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_lowerCAmelCase : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Tuple = len(self.sp_model ) + self.fairseq_offset
_lowerCAmelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> Optional[Any]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
_lowerCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_a , out_type=_a )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Optional[Any] = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = ''.join(_a ).replace(_a , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Any = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
_lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 355
|
_lowerCamelCase : List[Any] = tuple[float, float, float]
_lowerCamelCase : Tuple = tuple[float, float, float]
def _UpperCAmelCase (UpperCamelCase_ : Pointad , UpperCamelCase_ : Pointad ):
'''simple docstring'''
_lowerCAmelCase : Tuple = end_pointa[0] - end_pointa[0]
_lowerCAmelCase : str = end_pointa[1] - end_pointa[1]
_lowerCAmelCase : List[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _UpperCAmelCase (UpperCamelCase_ : Vectorad , UpperCamelCase_ : Vectorad ):
'''simple docstring'''
_lowerCAmelCase : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
_lowerCAmelCase : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_lowerCAmelCase : List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _UpperCAmelCase (UpperCamelCase_ : Vectorad , UpperCamelCase_ : int ):
'''simple docstring'''
return tuple(round(UpperCamelCase_ , UpperCamelCase_ ) for x in vector ) == (0, 0, 0)
def _UpperCAmelCase (UpperCamelCase_ : Pointad , UpperCamelCase_ : Pointad , UpperCamelCase_ : Pointad , UpperCamelCase_ : int = 10 ):
'''simple docstring'''
_lowerCAmelCase : Any = create_vector(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = create_vector(UpperCamelCase_ , UpperCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
| 159
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
a_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
a_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ):
for tf_name, hf_name in patterns:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase )
__lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
__lowerCamelCase = {}
# separating decoder weights
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = DECODER_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = REMAINING_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase = mapping['''model.embed_positions.weight''']
__lowerCamelCase = mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ):
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 330
| 1
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
A__ : List[str] = HfApi()
A__ : List[str] = {}
# fmt: off
A__ : Optional[int] = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
A__ : Dict = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
A__ : Optional[Any] = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
A__ : Tuple = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
A__ : Optional[int] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
A__ : List[Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
A__ : List[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
A__ : List[Any] = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
A__ : Union[str, Any] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
A__ : str = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
A__ : Any = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
A__ : List[Any] = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
A__ : Optional[int] = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
A__ : str = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
A__ : List[str] = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
A__ : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
A__ : Optional[int] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
A__ : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
A__ : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
A__ : Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
A__ : Union[str, Any] = torch.tensor([1_0] * noise.shape[0])
with torch.no_grad():
A__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :3_0], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 0
|
'''simple docstring'''
from __future__ import annotations
A__ : List[Any] = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(_UpperCAmelCase ):
__snake_case , __snake_case : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : Union[str, Any] = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
__snake_case : Optional[Any] = 0
return None
def a_ ( _UpperCAmelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(_UpperCAmelCase ,end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
A__ : List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 0
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Any = 'lxmert'
A_ : List[Any] = {}
def __init__(self : int , a__ : Any=3_0522 , a__ : Optional[int]=768 , a__ : Dict=12 , a__ : int=9500 , a__ : Dict=1600 , a__ : Any=400 , a__ : List[str]=3072 , a__ : List[str]="gelu" , a__ : int=0.1 , a__ : Dict=0.1 , a__ : str=512 , a__ : Any=2 , a__ : Any=0.0_2 , a__ : Union[str, Any]=1E-12 , a__ : str=9 , a__ : Optional[Any]=5 , a__ : int=5 , a__ : Optional[int]=2048 , a__ : Union[str, Any]=4 , a__ : Any=6.6_7 , a__ : List[Any]=True , a__ : str=True , a__ : Optional[Any]=True , a__ : Dict=True , a__ : Dict=True , a__ : int=True , a__ : Union[str, Any]=True , **a__ : List[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = num_qa_labels
__snake_case = num_object_labels
__snake_case = num_attr_labels
__snake_case = l_layers
__snake_case = x_layers
__snake_case = r_layers
__snake_case = visual_feat_dim
__snake_case = visual_pos_dim
__snake_case = visual_loss_normalizer
__snake_case = task_matched
__snake_case = task_mask_lm
__snake_case = task_obj_predict
__snake_case = task_qa
__snake_case = visual_obj_loss
__snake_case = visual_attr_loss
__snake_case = visual_feat_loss
__snake_case = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a__ )
| 24
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _snake_case :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ):
__magic_name__ : int = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : List[Any] = is_training
__magic_name__ : Dict = use_labels
__magic_name__ : Optional[Any] = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Any = eos_token_id
__magic_name__ : str = pad_token_id
__magic_name__ : int = bos_token_id
__magic_name__ : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ : Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ : List[str] = prepare_led_inputs_dict(_a , _a , _a )
__magic_name__ : Union[str, Any] = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
__magic_name__ : List[Any] = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Dict = TFLEDModel(config=_a ).get_decoder()
__magic_name__ : Optional[int] = inputs_dict["input_ids"]
__magic_name__ : Union[str, Any] = input_ids[:1, :]
__magic_name__ : str = inputs_dict["attention_mask"][:1, :]
__magic_name__ : int = 1
# first forward pass
__magic_name__ : Tuple = model(_a , attention_mask=_a , use_cache=_a )
__magic_name__ , __magic_name__ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ : List[str] = model(_a , attention_mask=_a )[0]
__magic_name__ : Dict = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Any , _snake_case : str=None , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : Any=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
__magic_name__ : str = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFLEDModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ : Optional[Any] = 2
__magic_name__ : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ : Any = True
__magic_name__ : str = self.model_tester.seq_length
__magic_name__ : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__magic_name__ : str = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
__magic_name__ : Any = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = False
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = model_class(_a )
__magic_name__ : str = model(self._prepare_for_class(_a , _a ) )
__magic_name__ : Any = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__magic_name__ : Tuple = model_class(_a )
__magic_name__ : Optional[Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Dict = True
__magic_name__ : str = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__magic_name__ : Union[str, Any] = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
return tf.constant(_snake_case , dtype=tf.intaa )
snake_case : Optional[int] = 1E-4
@slow
@require_tf
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ : Optional[int] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Any = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : List[Any] = model(**_a )[0]
__magic_name__ : List[str] = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : int = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : Union[str, Any] = model(**_a )[0]
__magic_name__ : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : str = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
| 281
| 0
|
import functools
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Validation
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(SCREAMING_SNAKE_CASE_ ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 0
if min(SCREAMING_SNAKE_CASE_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(SCREAMING_SNAKE_CASE_ ) >= 366:
raise ValueError("All days elements should be less than 366" )
lowercase__ = set(SCREAMING_SNAKE_CASE_ )
@functools.cache
def dynamic_programming(SCREAMING_SNAKE_CASE_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ = n - k
# Calculate C(n,k)
for i in range(SCREAMING_SNAKE_CASE_ ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return binomial_coefficient(2 * node_count , SCREAMING_SNAKE_CASE_ ) // (node_count + 1)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if n < 0:
raise ValueError("factorial() not defined for negative values" )
lowercase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return catalan_number(SCREAMING_SNAKE_CASE_ ) * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 224
| 1
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
__UpperCAmelCase : Dict = parser.parse_args()
if args.model_type == "roberta":
__UpperCAmelCase : str = RobertaForMaskedLM.from_pretrained(args.model_name)
__UpperCAmelCase : Tuple = "roberta"
elif args.model_type == "gpt2":
__UpperCAmelCase : Dict = GPTaLMHeadModel.from_pretrained(args.model_name)
__UpperCAmelCase : Optional[int] = "transformer"
__UpperCAmelCase : Dict = model.state_dict()
__UpperCAmelCase : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__UpperCAmelCase : Optional[int] = state_dict[f'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__UpperCAmelCase : List[str] = f'{prefix}.embeddings.{w}.weight'
__UpperCAmelCase : List[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
__UpperCAmelCase : List[str] = f'{prefix}.embeddings.LayerNorm.{w}'
__UpperCAmelCase : Optional[int] = state_dict[param_name]
# Transformer Blocks #
__UpperCAmelCase : str = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__UpperCAmelCase : Any = state_dict[
f'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
__UpperCAmelCase : Optional[Any] = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__UpperCAmelCase : int = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__UpperCAmelCase : List[Any] = state_dict[f'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCAmelCase : int = state_dict[f'lm_head.dense.{w}']
__UpperCAmelCase : List[Any] = state_dict[f'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__UpperCAmelCase : str = state_dict[f'{prefix}.ln_f.{w}']
__UpperCAmelCase : Tuple = state_dict["lm_head.weight"]
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 111
|
def A__ ( SCREAMING_SNAKE_CASE__ = 200) -> int:
__snake_case: Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
__snake_case: List[Any] = [0] * (pence + 1)
__snake_case: int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(SCREAMING_SNAKE_CASE__ , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 111
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 42
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : int=("DownEncoderBlock2D",) , __UpperCAmelCase : Tuple=(64,) , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Optional[Any]="silu" , __UpperCAmelCase : Union[str, Any]=True , ) ->str:
"""simple docstring"""
super().__init__()
a = layers_per_block
a = torch.nn.Convad(
__UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
a = None
a = nn.ModuleList([] )
# down
a = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
a = output_channel
a = block_out_channels[i]
a = i == len(__UpperCAmelCase ) - 1
a = get_down_block(
__UpperCAmelCase , num_layers=self.layers_per_block , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
self.down_blocks.append(__UpperCAmelCase )
# mid
a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# out
a = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCAmelCase , eps=1e-6 )
a = nn.SiLU()
a = 2 * out_channels if double_z else out_channels
a = nn.Convad(block_out_channels[-1] , __UpperCAmelCase , 3 , padding=1 )
a = False
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str ) ->Tuple:
"""simple docstring"""
a = x
a = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : Union[str, Any] ):
def custom_forward(*__UpperCAmelCase : str ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
# middle
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
a = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase )
# middle
a = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
a = down_block(__UpperCAmelCase )
# middle
a = self.mid_block(__UpperCAmelCase )
# post-process
a = self.conv_norm_out(__UpperCAmelCase )
a = self.conv_act(__UpperCAmelCase )
a = self.conv_out(__UpperCAmelCase )
return sample
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Optional[int]=("UpDecoderBlock2D",) , __UpperCAmelCase : List[Any]=(64,) , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : List[Any]=32 , __UpperCAmelCase : Any="silu" , __UpperCAmelCase : Union[str, Any]="group" , ) ->List[str]:
"""simple docstring"""
super().__init__()
a = layers_per_block
a = nn.Convad(
__UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
a = None
a = nn.ModuleList([] )
a = in_channels if norm_type == '''spatial''' else None
# mid
a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# up
a = list(reversed(__UpperCAmelCase ) )
a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
a = output_channel
a = reversed_block_out_channels[i]
a = i == len(__UpperCAmelCase ) - 1
a = get_up_block(
__UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , resnet_time_scale_shift=__UpperCAmelCase , )
self.up_blocks.append(__UpperCAmelCase )
a = output_channel
# out
if norm_type == "spatial":
a = SpatialNorm(block_out_channels[0] , __UpperCAmelCase )
else:
a = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCAmelCase , eps=1e-6 )
a = nn.SiLU()
a = nn.Convad(block_out_channels[0] , __UpperCAmelCase , 3 , padding=1 )
a = False
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple=None ) ->Optional[Any]:
"""simple docstring"""
a = z
a = self.conv_in(__UpperCAmelCase )
a = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : str ):
def custom_forward(*__UpperCAmelCase : Optional[int] ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
a = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
# middle
a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase )
a = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
a = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
else:
# middle
a = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
a = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
a = up_block(__UpperCAmelCase , __UpperCAmelCase )
# post-process
if latent_embeds is None:
a = self.conv_norm_out(__UpperCAmelCase )
else:
a = self.conv_norm_out(__UpperCAmelCase , __UpperCAmelCase )
a = self.conv_act(__UpperCAmelCase )
a = self.conv_out(__UpperCAmelCase )
return sample
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]="random" , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Optional[int]=True ) ->Optional[Any]:
"""simple docstring"""
super().__init__()
a = n_e
a = vq_embed_dim
a = beta
a = legacy
a = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
a = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
a = self.used.shape[0]
a = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
a = self.re_embed
a = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
a = n_e
a = sane_index_shape
def __lowerCAmelCase ( self : str , __UpperCAmelCase : int ) ->Tuple:
"""simple docstring"""
a = inds.shape
assert len(__UpperCAmelCase ) > 1
a = inds.reshape(ishape[0] , -1 )
a = self.used.to(__UpperCAmelCase )
a = (inds[:, :, None] == used[None, None, ...]).long()
a = match.argmax(-1 )
a = match.sum(2 ) < 1
if self.unknown_index == "random":
a = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
a = self.unknown_index
return new.reshape(__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = inds.shape
assert len(__UpperCAmelCase ) > 1
a = inds.reshape(ishape[0] , -1 )
a = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
a = 0 # simply set to zero
a = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Any ) ->Any:
"""simple docstring"""
a = z.permute(0 , 2 , 3 , 1 ).contiguous()
a = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
a = torch.argmin(torch.cdist(__UpperCAmelCase , self.embedding.weight ) , dim=1 )
a = self.embedding(__UpperCAmelCase ).view(z.shape )
a = None
a = None
# compute loss for embedding
if not self.legacy:
a = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
a = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
a = z + (z_q - z).detach()
# reshape back to match original input shape
a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
a = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
a = self.remap_to_used(__UpperCAmelCase )
a = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
a = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ) ->List[str]:
"""simple docstring"""
if self.remap is not None:
a = indices.reshape(shape[0] , -1 ) # add batch axis
a = self.unmap_to_all(__UpperCAmelCase )
a = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
a = self.embedding(__UpperCAmelCase )
if shape is not None:
a = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=False ) ->Optional[int]:
"""simple docstring"""
a = parameters
a , a = torch.chunk(__UpperCAmelCase , 2 , dim=1 )
a = torch.clamp(self.logvar , -30.0 , 20.0 )
a = deterministic
a = torch.exp(0.5 * self.logvar )
a = torch.exp(self.logvar )
if self.deterministic:
a = a = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Generator] = None ) ->torch.FloatTensor:
"""simple docstring"""
a = randn_tensor(
self.mean.shape , generator=__UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
a = self.mean + self.std * sample
return x
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Any=None ) ->int:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=[1, 2, 3] ) ->List[Any]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
a = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return self.mean
| 352
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertGenerationTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
super().setUp()
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = '''<s>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1_002 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
a = '''Hello World!'''
a = [18_536, 2_260, 101]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = ''' '''.join(__UpperCAmelCase )
a = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = BertGenerationConfig()
a = BertGenerationEncoder(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
a = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 26
| 0
|
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> Optional[int]:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def _SCREAMING_SNAKE_CASE (A , A , A , A , A=True ) -> Any:
"""simple docstring"""
model.train()
lowercase__ = model(A )
lowercase__ = F.mse_loss(A , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(A )
def _SCREAMING_SNAKE_CASE (A , A=False ) -> Optional[int]:
"""simple docstring"""
set_seed(42 )
lowercase__ = RegressionModel()
lowercase__ = deepcopy(A )
lowercase__ = RegressionDataset(length=80 )
lowercase__ = DataLoader(A , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase__ = AdamW(params=model.parameters() , lr=1E-3 )
lowercase__ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowercase__ = LambdaLR(A , lr_lambda=lambda A : epoch**0.65 )
lowercase__ = LambdaLR(A , lr_lambda=lambda A : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = accelerator.prepare(A , A , A , A )
else:
lowercase__ ,lowercase__ = accelerator.prepare(A , A )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A )
# Use a single batch
lowercase__ ,lowercase__ = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A , A , A , A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A , A , A , A )
else:
# Sync grads
step_model(A , A , A , A )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(A , A , A , A )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(A ) )]
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A )
# Use a single batch
lowercase__ ,lowercase__ = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A , A , A , A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A , A , A , A )
else:
# Sync grads
step_model(A , A , A , A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(A ) )]
def _SCREAMING_SNAKE_CASE (A=False , A=False ) -> Tuple:
"""simple docstring"""
lowercase__ = Accelerator(
split_batches=A , dispatch_batches=A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A )
for iteration, batch in enumerate(A ):
lowercase__ ,lowercase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A , A , A , A , A )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(A ):
step_model(A , A , A , A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(A ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase__ = ddp_input[torch.randperm(len(A ) )]
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE (A=False , A=False ) -> str:
"""simple docstring"""
lowercase__ = Accelerator(
split_batches=A , dispatch_batches=A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A , A )
for iteration, batch in enumerate(A ):
lowercase__ ,lowercase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) )
lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(A , A , A , A , A )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(A )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(A ):
step_model(A , A , A , A )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
lowercase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(A ))
if accelerator.num_processes > 1:
check_model_parameters(A , A , A , A )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE () -> str:
"""simple docstring"""
lowercase__ = Accelerator()
lowercase__ = RegressionDataset(length=80 )
lowercase__ = DataLoader(A , batch_size=16 )
lowercase__ = RegressionDataset(length=96 )
lowercase__ = DataLoader(A , batch_size=16 )
lowercase__ ,lowercase__ = accelerator.prepare(A , A )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if iteration < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if batch_num < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Accelerator()
lowercase__ = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(A )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(A )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(A , A )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 2
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 159
| 0
|
"""simple docstring"""
import torch
from torch import nn
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 , lowerCAmelCase__=False):
super().__init__()
__SCREAMING_SNAKE_CASE = n_token
__SCREAMING_SNAKE_CASE = d_embed
__SCREAMING_SNAKE_CASE = d_proj
__SCREAMING_SNAKE_CASE = cutoffs + [n_token]
__SCREAMING_SNAKE_CASE = [0] + self.cutoffs
__SCREAMING_SNAKE_CASE = div_val
__SCREAMING_SNAKE_CASE = self.cutoffs[0]
__SCREAMING_SNAKE_CASE = len(self.cutoffs) - 1
__SCREAMING_SNAKE_CASE = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(self.n_clusters))
__SCREAMING_SNAKE_CASE = nn.ModuleList()
__SCREAMING_SNAKE_CASE = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase__ , lowerCAmelCase__)))
else:
self.out_projs.append(lowerCAmelCase__)
self.out_layers.append(nn.Linear(lowerCAmelCase__ , lowerCAmelCase__))
else:
for i in range(len(self.cutoffs)):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__SCREAMING_SNAKE_CASE = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase__ , lowerCAmelCase__)))
self.out_layers.append(nn.Linear(lowerCAmelCase__ , r_idx - l_idx))
__SCREAMING_SNAKE_CASE = keep_order
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
if proj is None:
__SCREAMING_SNAKE_CASE = nn.functional.linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__SCREAMING_SNAKE_CASE = nn.functional.linear(lowerCAmelCase__ , proj.t().contiguous())
__SCREAMING_SNAKE_CASE = nn.functional.linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False):
if labels is not None:
# Shift so that tokens < n predict n
__SCREAMING_SNAKE_CASE = hidden[..., :-1, :].contiguous()
__SCREAMING_SNAKE_CASE = labels[..., 1:].contiguous()
__SCREAMING_SNAKE_CASE = hidden.view(-1 , hidden.size(-1))
__SCREAMING_SNAKE_CASE = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
__SCREAMING_SNAKE_CASE = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
__SCREAMING_SNAKE_CASE = self._compute_logit(lowerCAmelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
__SCREAMING_SNAKE_CASE = labels != -1_0_0
__SCREAMING_SNAKE_CASE = torch.zeros_like(lowerCAmelCase__ , dtype=hidden.dtype , device=hidden.device)
__SCREAMING_SNAKE_CASE = (
-nn.functional.log_softmax(lowerCAmelCase__ , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1)
else:
# construct weights and biases
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__SCREAMING_SNAKE_CASE = self.out_layers[0].weight[l_idx:r_idx]
__SCREAMING_SNAKE_CASE = self.out_layers[0].bias[l_idx:r_idx]
else:
__SCREAMING_SNAKE_CASE = self.out_layers[i].weight
__SCREAMING_SNAKE_CASE = self.out_layers[i].bias
if i == 0:
__SCREAMING_SNAKE_CASE = torch.cat([weight_i, self.cluster_weight] , dim=0)
__SCREAMING_SNAKE_CASE = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase__)
biases.append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = weights[0], biases[0], self.out_projs[0]
__SCREAMING_SNAKE_CASE = self._compute_logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(lowerCAmelCase__ , dim=1)
if labels is None:
__SCREAMING_SNAKE_CASE = hidden.new_empty((head_logit.size(0), self.n_token))
else:
__SCREAMING_SNAKE_CASE = torch.zeros_like(lowerCAmelCase__ , dtype=hidden.dtype , device=hidden.device)
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [0] + self.cutoffs
for i in range(len(lowerCAmelCase__) - 1):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__SCREAMING_SNAKE_CASE = (labels >= l_idx) & (labels < r_idx)
__SCREAMING_SNAKE_CASE = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__SCREAMING_SNAKE_CASE = labels.index_select(0 , lowerCAmelCase__) - l_idx
__SCREAMING_SNAKE_CASE = head_logprob.index_select(0 , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = hidden.index_select(0 , lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = hidden
if i == 0:
if labels is not None:
__SCREAMING_SNAKE_CASE = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
__SCREAMING_SNAKE_CASE = head_logprob[:, : self.cutoffs[0]]
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = weights[i], biases[i], self.out_projs[i]
__SCREAMING_SNAKE_CASE = self._compute_logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(lowerCAmelCase__ , dim=1)
__SCREAMING_SNAKE_CASE = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__SCREAMING_SNAKE_CASE = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
__SCREAMING_SNAKE_CASE = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__SCREAMING_SNAKE_CASE = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase__ , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case_ ( self , lowerCAmelCase__):
if self.n_clusters == 0:
__SCREAMING_SNAKE_CASE = self._compute_logit(lowerCAmelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowerCAmelCase__ , dim=-1)
else:
# construct weights and biases
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__SCREAMING_SNAKE_CASE = self.out_layers[0].weight[l_idx:r_idx]
__SCREAMING_SNAKE_CASE = self.out_layers[0].bias[l_idx:r_idx]
else:
__SCREAMING_SNAKE_CASE = self.out_layers[i].weight
__SCREAMING_SNAKE_CASE = self.out_layers[i].bias
if i == 0:
__SCREAMING_SNAKE_CASE = torch.cat([weight_i, self.cluster_weight] , dim=0)
__SCREAMING_SNAKE_CASE = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase__)
biases.append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = weights[0], biases[0], self.out_projs[0]
__SCREAMING_SNAKE_CASE = self._compute_logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = hidden.new_empty((head_logit.size(0), self.n_token))
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(lowerCAmelCase__ , dim=1)
__SCREAMING_SNAKE_CASE = [0] + self.cutoffs
for i in range(len(lowerCAmelCase__) - 1):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__SCREAMING_SNAKE_CASE = head_logprob[:, : self.cutoffs[0]]
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = weights[i], biases[i], self.out_projs[i]
__SCREAMING_SNAKE_CASE = self._compute_logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(lowerCAmelCase__ , dim=1)
__SCREAMING_SNAKE_CASE = head_logprob[:, -i] + tail_logprob_i
__SCREAMING_SNAKE_CASE = logprob_i
return out
| 255
|
"""simple docstring"""
import inspect
import unittest
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def snake_case_ ( self):
import diffusers
from diffusers.dependency_versions_table import deps
__SCREAMING_SNAKE_CASE = inspect.getmembers(lowerCAmelCase__ , inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__SCREAMING_SNAKE_CASE = """k-diffusion"""
elif backend == "invisible_watermark":
__SCREAMING_SNAKE_CASE = """invisible-watermark"""
assert backend in deps, f"{backend} is not in the deps table!"
| 255
| 1
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase__ = HfApi()
UpperCAmelCase__ = {}
# fmt: off
UpperCAmelCase__ = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCAmelCase__ = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCAmelCase__ = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCAmelCase__ = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCAmelCase__ = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCAmelCase__ = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCAmelCase__ = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCAmelCase__ = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCAmelCase__ = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCAmelCase__ = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCAmelCase__ = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCAmelCase__ = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCAmelCase__ = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCAmelCase__ = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCAmelCase__ = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCAmelCase__ = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase__ = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
UpperCAmelCase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
UpperCAmelCase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 0
|
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( a :Matrix , a :int , a :int , a :int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( a :Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( a :Matrix ) -> Matrix | None:
if location := find_empty_location(a ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a = digit
if sudoku(a ) is not None:
return grid
a = 0
return None
def _a ( a :Matrix ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 0
| 1
|
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase ) -> list[int]:
return [ord(__lowercase ) - 96 for elem in plain]
def a__ ( __lowercase ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def a__ ( ) -> None:
_A = encode(input("-> " ).strip().lower() )
print("Encoded: " , __lowercase )
print("Decoded:" , decode(__lowercase ) )
if __name__ == "__main__":
main()
| 163
|
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> float:
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(__lowercase , __lowercase ) ) )
def a__ ( __lowercase ) -> None:
if point:
if isinstance(__lowercase , __lowercase ):
for item in point:
if not isinstance(__lowercase , (int, float) ):
_A = (
"Expected a list of numbers as input, found "
f"""{type(__lowercase ).__name__}"""
)
raise TypeError(__lowercase )
else:
_A = f"""Expected a list of numbers as input, found {type(__lowercase ).__name__}"""
raise TypeError(__lowercase )
else:
raise ValueError("Missing an input" )
def a__ ( __lowercase , __lowercase ) -> float:
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(__lowercase , __lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
| 1
|
"""simple docstring"""
import math
class UpperCamelCase__ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
lowerCAmelCase_ : Optional[Any] = 0.0
lowerCAmelCase_ : Optional[Any] = 0.0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : list[list[int | float]] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float ):
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCAmelCase_ : int = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCAmelCase_ : Optional[Any] = SelfOrganizingMap()
lowerCAmelCase_ : Tuple = 3
lowerCAmelCase_ : Union[str, Any] = 0.5
for _ in range(lowerCAmelCase__ ):
for j in range(len(lowerCAmelCase__ ) ):
# training sample
lowerCAmelCase_ : List[Any] = training_samples[j]
# Compute the winning vector
lowerCAmelCase_ : Union[str, Any] = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# Update the winning vector
lowerCAmelCase_ : str = self_organizing_map.update(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# classify test sample
lowerCAmelCase_ : List[Any] = [0, 0, 0, 1]
lowerCAmelCase_ : Any = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# results
print(f"Clusters that the test sample belongs to : {winner}" )
print(f"Weights that have been trained : {weights}" )
# running the main() function
if __name__ == "__main__":
main()
| 224
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : int = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 224
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , a_ : str , a_ : Dict=14 , a_ : List[Any]=7 , a_ : int=True , a_ : int=True , a_ : int=False , a_ : Optional[int]=True , a_ : int=99 , a_ : Optional[Any]=32 , a_ : List[str]=4 , a_ : Dict=4 , a_ : Optional[Any]=4 , a_ : Union[str, Any]=37 , a_ : Union[str, Any]="gelu" , a_ : str=0.1 , a_ : Tuple=0.1 , a_ : Any=5_12 , a_ : Optional[Any]=0.02 , ):
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : Union[str, Any] = seq_length
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : Optional[Any] = use_input_mask
lowerCAmelCase_ : int = use_token_type_ids
lowerCAmelCase_ : str = use_labels
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Optional[Any] = rotary_dim
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Dict = vocab_size - 1
lowerCAmelCase_ : List[str] = vocab_size - 1
lowerCAmelCase_ : Union[str, Any] = vocab_size - 1
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Any = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=a_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : int = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase ( self : List[str] , a_ : Dict , a_ : Optional[int] , a_ : int , a_ : Optional[Any] ):
lowerCAmelCase_ : List[str] = 20
lowerCAmelCase_ : Optional[int] = model_class_name(a_ )
lowerCAmelCase_ : Any = model.init_cache(input_ids.shape[0] , a_ )
lowerCAmelCase_ : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCAmelCase_ : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : str = model(
input_ids[:, :-1] , attention_mask=a_ , past_key_values=a_ , position_ids=a_ , )
lowerCAmelCase_ : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase_ : Dict = model(
input_ids[:, -1:] , attention_mask=a_ , past_key_values=outputs_cache.past_key_values , position_ids=a_ , )
lowerCAmelCase_ : List[str] = model(a_ )
lowerCAmelCase_ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase ( self : Dict , a_ : List[str] , a_ : int , a_ : Tuple , a_ : Tuple ):
lowerCAmelCase_ : Any = 20
lowerCAmelCase_ : List[Any] = model_class_name(a_ )
lowerCAmelCase_ : List[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase_ : int = model.init_cache(input_ids.shape[0] , a_ )
lowerCAmelCase_ : List[str] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : List[Any] = model(
input_ids[:, :-1] , attention_mask=a_ , past_key_values=a_ , position_ids=a_ , )
lowerCAmelCase_ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase_ : List[Any] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=a_ , position_ids=a_ , )
lowerCAmelCase_ : int = model(a_ , attention_mask=a_ )
lowerCAmelCase_ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
a_ : Union[str, Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Tuple = FlaxGPTJModelTester(self )
def lowerCamelCase ( self : Dict ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(a_ , a_ , a_ , a_ )
def lowerCamelCase ( self : Dict ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
a_ , a_ , a_ , a_ )
@tooslow
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
lowerCAmelCase_ : Tuple = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=a_ , truncation=a_ )
lowerCAmelCase_ : List[str] = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : List[Any] = model.config.eos_token_id
lowerCAmelCase_ : Any = jax.jit(model.generate )
lowerCAmelCase_ : Union[str, Any] = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase_ : List[Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
lowerCAmelCase_ : List[Any] = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(a_ , a_ )
@is_pt_flax_cross_test
def lowerCamelCase ( self : str ):
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : Optional[int] = self._prepare_for_class(a_ , a_ )
lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Any = getattr(a_ , a_ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["input_ids"].shape
lowerCAmelCase_ : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = pt_model_class(a_ ).eval()
lowerCAmelCase_ : Union[str, Any] = model_class(a_ , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a_ )
lowerCAmelCase_ : Tuple = fx_state
with torch.no_grad():
lowerCAmelCase_ : int = pt_model(**a_ ).to_tuple()
lowerCAmelCase_ : Tuple = fx_model(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(a_ )
lowerCAmelCase_ : Optional[Any] = model_class.from_pretrained(a_ , from_pt=a_ )
lowerCAmelCase_ : Tuple = fx_model_loaded(**a_ ).to_tuple()
self.assertEqual(
len(a_ ) , len(a_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : Dict = self._prepare_for_class(a_ , a_ )
lowerCAmelCase_ : Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : int = getattr(a_ , a_ )
lowerCAmelCase_ : Optional[Any] = pt_model_class(a_ ).eval()
lowerCAmelCase_ : List[Any] = model_class(a_ , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(a_ , fx_model.params )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pt_inputs["input_ids"].shape
lowerCAmelCase_ : Dict = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : List[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**a_ ).to_tuple()
lowerCAmelCase_ : Dict = fx_model(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(a_ )
lowerCAmelCase_ : Dict = pt_model_class.from_pretrained(a_ , from_flax=a_ )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = pt_model_loaded(**a_ ).to_tuple()
self.assertEqual(
len(a_ ) , len(a_ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase ( self : Any ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Dict = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
lowerCAmelCase_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
| 161
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase__ = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowercase__ = {"""facebook/blenderbot-3B""": 128}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Dict = VOCAB_FILES_NAMES
a_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[int] = ["""input_ids""", """attention_mask"""]
a_ : int = BlenderbotTokenizer
def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Any=None , a_ : int=None , a_ : str="replace" , a_ : Tuple="<s>" , a_ : Optional[int]="</s>" , a_ : Union[str, Any]="</s>" , a_ : Union[str, Any]="<s>" , a_ : Optional[Any]="<unk>" , a_ : str="<pad>" , a_ : List[Any]="<mask>" , a_ : Tuple=False , a_ : Dict=True , **a_ : str , ):
super().__init__(
a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , )
lowerCAmelCase_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : str = getattr(a_ , pre_tok_state.pop("type" ) )
lowerCAmelCase_ : int = add_prefix_space
lowerCAmelCase_ : List[Any] = pre_tok_class(**a_ )
lowerCAmelCase_ : Any = add_prefix_space
lowerCAmelCase_ : str = "post_processor"
lowerCAmelCase_ : str = getattr(self.backend_tokenizer , a_ , a_ )
if tokenizer_component_instance:
lowerCAmelCase_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ : Dict = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase_ : Optional[int] = tuple(state["cls"] )
lowerCAmelCase_ : Optional[int] = False
if state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : List[str] = add_prefix_space
lowerCAmelCase_ : Any = True
if state.get("trim_offsets" , a_ ) != trim_offsets:
lowerCAmelCase_ : int = trim_offsets
lowerCAmelCase_ : List[str] = True
if changes_to_apply:
lowerCAmelCase_ : Optional[Any] = getattr(a_ , state.pop("type" ) )
lowerCAmelCase_ : Tuple = component_class(**a_ )
setattr(self.backend_tokenizer , a_ , a_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase ( self : int ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase ( self : int , a_ : List[Any] ):
lowerCAmelCase_ : Optional[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value
lowerCAmelCase_ : Tuple = value
def lowerCamelCase ( self : int , *a_ : List[str] , **a_ : Optional[int] ):
lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : str , *a_ : Union[str, Any] , **a_ : List[str] ):
lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : int , a_ : str , a_ : Optional[str] = None ):
lowerCAmelCase_ : str = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def lowerCamelCase ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Union[str, Any] , a_ : "Conversation" ):
lowerCAmelCase_ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(a_ )
lowerCAmelCase_ : Tuple = " ".join(a_ )
lowerCAmelCase_ : Any = self.encode(a_ )
if len(a_ ) > self.model_max_length:
lowerCAmelCase_ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 161
| 1
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
a_ :List[Any] = threading.Lock()
a_ :str = None
a_ :Optional[Any] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
a_ :List[str] = logging.WARNING
a_ :str = True
def lowercase_ ():
snake_case__ : str = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ():
return __name__.split('.' )[0]
def lowercase_ ():
return logging.getLogger(_get_library_name() )
def lowercase_ ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
snake_case__ : Union[str, Any] = logging.StreamHandler() # Set sys.stderr as stream.
snake_case__ : Tuple = sys.stderr.flush
# Apply our default configuration to the library root logger.
snake_case__ : int = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
snake_case__ : List[str] = False
def lowercase_ ():
global _default_handler
with _lock:
if not _default_handler:
return
snake_case__ : List[str] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
snake_case__ : Tuple = None
def lowercase_ ():
return log_levels
def lowercase_ (A : Union[str, Any] = None ):
if name is None:
snake_case__ : Optional[int] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case_ )
def lowercase_ ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ (A : Union[str, Any] ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case_ )
def lowercase_ ():
return set_verbosity(snake_case_ )
def lowercase_ ():
return set_verbosity(snake_case_ )
def lowercase_ ():
return set_verbosity(snake_case_ )
def lowercase_ ():
return set_verbosity(snake_case_ )
def lowercase_ ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowercase_ ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowercase_ (A : Dict ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case_ )
def lowercase_ (A : Dict ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case_ )
def lowercase_ ():
_configure_library_root_logger()
snake_case__ : Union[str, Any] = False
def lowercase_ ():
_configure_library_root_logger()
snake_case__ : List[Any] = True
def lowercase_ ():
snake_case__ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
snake_case__ : str = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(snake_case_ )
def lowercase_ ():
snake_case__ : Any = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case_ )
def lowercase_ (self : str , *A : Any , **A : Union[str, Any] ):
snake_case__ : str = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , snake_case_ )
if no_advisory_warnings:
return
self.warning(*snake_case_ , **snake_case_ )
a_ :Optional[Any] = warning_advice
@functools.lru_cache(snake_case_ )
def lowercase_ (self : str , *A : int , **A : int ):
self.warning(*snake_case_ , **snake_case_ )
a_ :str = warning_once
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int], *_snake_case : List[Any], **_snake_case : Tuple ) ->List[Any]: # pylint: disable=unused-argument
snake_case__ : List[str] = args[0] if args else None
def __iter__( self : List[str] ) ->Optional[Any]:
return iter(self._iterator )
def __getattr__( self : List[str], _snake_case : List[Any] ) ->Union[str, Any]:
def empty_fn(*_snake_case : Optional[Any], **_snake_case : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Union[str, Any] ) ->Tuple:
return self
def __exit__( self : int, _snake_case : str, _snake_case : List[str], _snake_case : Optional[Any] ) ->List[str]:
return
class snake_case__ :
"""simple docstring"""
def __call__( self : List[str], *_snake_case : Any, **_snake_case : List[Any] ) ->Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_a, **_a )
else:
return EmptyTqdm(*_a, **_a )
def lowercase_ ( self : List[str], *_snake_case : List[str], **_snake_case : List[str] ) ->Dict:
snake_case__ : Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a, **_a )
def lowercase_ ( self : Tuple ) ->Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a_ :str = _tqdm_cls()
def lowercase_ ():
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ():
global _tqdm_active
snake_case__ : Optional[int] = True
hf_hub_utils.enable_progress_bars()
def lowercase_ ():
global _tqdm_active
snake_case__ : List[str] = False
hf_hub_utils.disable_progress_bars()
| 277
|
from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = (AXIS_A - AXIS_B) / AXIS_A
_A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : Optional[Any] = radians(snake_case_ )
_A : str = radians(snake_case_ )
# Equation
_A : Dict = sin((phi_a - phi_a) / 2 )
_A : List[str] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase__ ( ):
snake_case_ : str = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_a , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_a , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_a )
return parser.parse_args()
def lowerCAmelCase__ ( ):
snake_case_ : str = parse_args()
# Import training_script as a module.
snake_case_ : Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ : Tuple = script_fpath.stem
snake_case_ : str = importlib.import_module(_a )
# Patch sys.argv
snake_case_ : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 359
|
import argparse
import copy
def lowerCAmelCase__ ( _a : List[Any] ):
snake_case_ : List[Any] = {}
with open(_a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : Dict = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase__ ( _a : Optional[Any] , _a : Optional[int] ):
with open(_a ) as f:
snake_case_ : List[str] = f.read(1 )
snake_case_ : Optional[Any] = start_node
snake_case_ : Optional[Any] = []
snake_case_ : Optional[int] = start_node
snake_case_ : int = 0
while visiting not in first_solution:
snake_case_ : List[str] = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_a ) and k[0] not in first_solution:
snake_case_ : List[str] = k[1]
snake_case_ : Dict = k[0]
first_solution.append(_a )
snake_case_ : Dict = distance_of_first_solution + int(_a )
snake_case_ : Optional[int] = best_node
first_solution.append(_a )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def lowerCAmelCase__ ( _a : Optional[int] , _a : List[str] ):
snake_case_ : Optional[Any] = []
for n in solution[1:-1]:
snake_case_ : Any = solution.index(_a )
for kn in solution[1:-1]:
snake_case_ : Any = solution.index(_a )
if n == kn:
continue
snake_case_ : Optional[int] = copy.deepcopy(_a )
snake_case_ : int = kn
snake_case_ : Any = n
snake_case_ : List[Any] = 0
for k in _tmp[:-1]:
snake_case_ : str = _tmp[_tmp.index(_a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Any = distance + int(i[1] )
_tmp.append(_a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase__ ( _a : Dict , _a : Optional[int] , _a : Optional[Any] , _a : Union[str, Any] , _a : int ):
snake_case_ : str = 1
snake_case_ : List[str] = first_solution
snake_case_ : int = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : int = solution
while count <= iters:
snake_case_ : Optional[Any] = find_neighborhood(_a , _a )
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Dict = len(_a ) - 1
snake_case_ : List[Any] = False
while not found:
snake_case_ : int = 0
while i < len(_a ):
if best_solution[i] != solution[i]:
snake_case_ : str = best_solution[i]
snake_case_ : Any = solution[i]
break
snake_case_ : Dict = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Optional[Any] = True
snake_case_ : Optional[int] = best_solution[:-1]
snake_case_ : List[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Union[str, Any] = cost
snake_case_ : Optional[int] = solution
else:
snake_case_ : Union[str, Any] = index_of_best_solution + 1
snake_case_ : int = neighborhood[index_of_best_solution]
if len(_a ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def lowerCAmelCase__ ( _a : str=None ):
snake_case_ : Optional[Any] = generate_neighbours(args.File )
snake_case_ , snake_case_ : List[Any] = generate_first_solution(
args.File , _a )
snake_case_ , snake_case_ : int = tabu_search(
_a , _a , _a , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36
| 0
|
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = False ) -> bool:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowercase : Tuple = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowercase : int = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_UpperCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
lowercase : Optional[Any] = primes[:idx]
break
lowercase , lowercase : Any = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase : Tuple = False
for r in range(_UpperCAmelCase ):
lowercase : Dict = pow(_UpperCAmelCase , d * 2**r , _UpperCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase : Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowercase__ ( ) -> None:
'''simple docstring'''
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 255
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def lowercase ( self : List[Any] ) -> List[Any]:
lowercase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
lowercase : Dict = AutoTokenizer.from_pretrained('google/mt5-small' )
lowercase : List[Any] = tokenizer('Hello there', return_tensors='tf' ).input_ids
lowercase : Any = tokenizer('Hi I am', return_tensors='tf' ).input_ids
lowercase : Dict = model(lowerCAmelCase, labels=lowerCAmelCase ).loss
lowercase : Optional[int] = -tf.math.reduce_mean(lowerCAmelCase ).numpy()
lowercase : Tuple = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 255
| 1
|
"""simple docstring"""
UpperCAmelCase : Optional[Any] = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 320
|
"""simple docstring"""
UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_UpperCamelCase )
__UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
__UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
__UpperCAmelCase : List[str] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
__UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__UpperCAmelCase : str = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__UpperCAmelCase : List[str] = encoded_data[:-padding]
__UpperCAmelCase : int = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__UpperCAmelCase : Optional[Any] = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
__UpperCAmelCase : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
| 1
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _snake_case ( unittest.TestCase ):
@parameterized.expand([(None,), ("""foo.json""",)])
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = GenerationConfig(
do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , config_name=_lowerCamelCase)
UpperCAmelCase__ : str = GenerationConfig.from_pretrained(_lowerCamelCase , config_name=_lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained("""gpt2""")
UpperCAmelCase__ : Dict = GenerationConfig.from_model_config(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_lowerCamelCase , _lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = GenerationConfig()
UpperCAmelCase__ : List[Any] = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
UpperCAmelCase__ : List[Any] = copy.deepcopy(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = generation_config.update(**_lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(_lowerCamelCase , _lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_lowerCamelCase , {"""foo""": """bar"""})
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = GenerationConfig()
UpperCAmelCase__ : Optional[Any] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""") as tmp_dir:
generation_config.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : Any = GenerationConfig.from_pretrained(_lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""")
UpperCAmelCase__ : int = GenerationConfig.from_model_config(_lowerCamelCase)
assert not hasattr(_lowerCamelCase , """foo""") # no new kwargs should be initialized if from config
def snake_case__ ( self):
UpperCAmelCase__ : int = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , _lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
UpperCAmelCase__ : int = GenerationConfig(
do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , _lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = GenerationConfig.from_pretrained(_lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , _lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def snake_case__ ( cls):
UpperCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(_lowerCamelCase)
@classmethod
def snake_case__ ( cls):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""")
except HTTPError:
pass
def snake_case__ ( self):
UpperCAmelCase__ : Any = GenerationConfig(
do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token)
UpperCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="""test-generation-config""" , push_to_hub=_lowerCamelCase , use_auth_token=self._token)
UpperCAmelCase__ : Dict = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = GenerationConfig(
do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token)
UpperCAmelCase__ : Dict = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=_lowerCamelCase , use_auth_token=self._token)
UpperCAmelCase__ : int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase))
| 163
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def _UpperCamelCase ( UpperCamelCase__ ):
if hor == 1_2_8:
UpperCAmelCase__ : int = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCAmelCase__ : Tuple = (3_2, 1_2_8, 2_5_6)
UpperCAmelCase__ : Union[str, Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 3_2:
UpperCAmelCase__ : Dict = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCAmelCase__ : Union[str, Any] = (3_2, 6_4, 1_2_8, 2_5_6)
UpperCAmelCase__ : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
UpperCAmelCase__ : Any = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
UpperCAmelCase__ : Tuple = model.state_dict()
UpperCAmelCase__ : Union[str, Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 1_4,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5_5_3_6,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
UpperCAmelCase__ : List[Any] = UNetaDModel(**UpperCamelCase__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCAmelCase__ : Optional[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = {
"""in_channels""": 1_4,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (3_2, 6_4, 1_2_8, 2_5_6),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5_5_3_6,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
UpperCAmelCase__ : Tuple = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
UpperCAmelCase__ : Optional[Any] = model
UpperCAmelCase__ : Dict = UNetaDModel(**UpperCamelCase__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCAmelCase__ : Dict = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ : str = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 163
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''funnel'''
lowerCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , lowercase=3_0_5_2_2 , lowercase=[4, 4, 4] , lowercase=None , lowercase=2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=6_4 , lowercase=3_0_7_2 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=None , lowercase=1E-9 , lowercase="mean" , lowercase="relative_shift" , lowercase=True , lowercase=True , lowercase=True , **lowercase , ):
"""simple docstring"""
A_ : Dict = vocab_size
A_ : Union[str, Any] = block_sizes
A_ : Union[str, Any] = [1] * len(lowercase ) if block_repeats is None else block_repeats
assert len(lowercase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
A_ : Optional[Any] = num_decoder_layers
A_ : Optional[int] = d_model
A_ : Union[str, Any] = n_head
A_ : Tuple = d_head
A_ : Optional[Any] = d_inner
A_ : List[Any] = hidden_act
A_ : List[str] = hidden_dropout
A_ : Optional[Any] = attention_dropout
A_ : str = activation_dropout
A_ : Union[str, Any] = initializer_range
A_ : Optional[int] = initializer_std
A_ : Dict = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
A_ : List[Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
A_ : str = attention_type
A_ : Optional[int] = separate_cls
A_ : List[Any] = truncate_seq
A_ : Dict = pool_q_only
super().__init__(**lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 192
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 192
| 1
|
'''simple docstring'''
import os
import string
import sys
a__ : int = 1 << 8
a__ : Optional[Any] = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 2_7,
"up": 6_5 + ARROW_KEY_FLAG,
"down": 6_6 + ARROW_KEY_FLAG,
"right": 6_7 + ARROW_KEY_FLAG,
"left": 6_8 + ARROW_KEY_FLAG,
"mod_int": 9_1,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 5_0,
"delete": 5_1,
"pg_up": 5_3,
"pg_down": 5_4,
}
a__ : Dict = KEYMAP["up"]
a__ : Optional[Any] = KEYMAP["left"]
if sys.platform == "win32":
a__ : List[Any] = []
a__ : Optional[int] = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(1_0):
a__ : Optional[int] = ord(str(i))
def snake_case ( )-> List[Any]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
__A = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCAmelCase ) == 0:
# Read the keystroke
__A = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__A = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__A = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(UpperCAmelCase )
if ord(UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
__A = chr(KEYMAP['esc'] )
except KeyError:
__A = cha[1]
else:
__A = ch.decode(UpperCAmelCase )
else:
__A = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__A = sys.stdin.fileno()
__A = termios.tcgetattr(UpperCAmelCase )
try:
tty.setraw(UpperCAmelCase )
__A = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCAmelCase , termios.TCSADRAIN , UpperCAmelCase )
return ch
def snake_case ( )-> List[Any]:
"""simple docstring"""
__A = get_raw_chars()
if ord(UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCAmelCase ) == KEYMAP["esc"]:
__A = get_raw_chars()
if ord(UpperCAmelCase ) == KEYMAP["mod_int"]:
__A = get_raw_chars()
if ord(UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 161
|
'''simple docstring'''
from __future__ import annotations
def snake_case ( UpperCAmelCase )-> list[int]:
"""simple docstring"""
__A = 2
__A = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase )
if n > 1:
factors.append(UpperCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161
| 1
|
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _UpperCamelCase ( self : List[str] , snake_case : Dict , snake_case : List[Any] , snake_case : List[str]=None , snake_case : List[Any]="uniform_average" , snake_case : int=True ):
'''simple docstring'''
A__ : Optional[int] = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 296
| 1
|
import math
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
if (
not isinstance(_lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
if (
not isinstance(_lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vision-encoder-decoder'
lowerCamelCase__ = True
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_lowerCAmelCase : str = kwargs.pop("encoder")
_lowerCAmelCase : Any = encoder_config.pop("model_type")
_lowerCAmelCase : str = kwargs.pop("decoder")
_lowerCAmelCase : List[str] = decoder_config.pop("model_type")
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[int] = True
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[str] = self.encoder.to_dict()
_lowerCAmelCase : List[str] = self.decoder.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}})
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
import torch
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : List[str] = super().generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape
_lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase : List[str] = dummy_input.pop("input_ids")
_lowerCAmelCase : List[str] = dummy_input.pop("attention_mask")
_lowerCAmelCase : Optional[int] = torch.zeros(__a)
return common_inputs
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(__a)
def snake_case__ ( self, __a, __a, __a = "default"):
'''simple docstring'''
_lowerCAmelCase : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
| 36
| 0
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCamelCase :Dict = get_tests_dir('''fixtures''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# A mock response for an HTTP head request to emulate server down
A_ : Optional[Any] = mock.Mock()
A_ : Dict = 500
A_ : List[str] = {}
A_ : Any = HTTPError
A_ : Any = {}
# Download this model to make sure it's in the cache.
A_ : Union[str, Any] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowercase ) as mock_head:
A_ : Optional[Any] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def _a (self ):
# This test is for deprecated behavior and can be removed in v5
A_ : int = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def _a (self ):
with self.assertRaises(lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Optional[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
A_ : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(lowercase )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def _a (cls ):
A_ : Optional[Any] = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def _a (cls ):
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def _a (self ):
A_ : Union[str, Any] = ViTImageProcessor.from_pretrained(lowercase )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
A_ : List[Any] = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id="""test-image-processor""" , push_to_hub=lowercase , use_auth_token=self._token )
A_ : List[str] = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def _a (self ):
A_ : Optional[int] = ViTImageProcessor.from_pretrained(lowercase )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
A_ : Any = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=lowercase , use_auth_token=self._token )
A_ : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def _a (self ):
CustomImageProcessor.register_for_auto_class()
A_ : Tuple = CustomImageProcessor.from_pretrained(lowercase )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
A_ : Any = AutoImageProcessor.from_pretrained(
F'{USER}/test-dynamic-image-processor' , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 135
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase :Tuple = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=False , lowercase=True , lowercase=False , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase=["<eop>", "<eod>"] , lowercase = None , **lowercase , ):
A_ : Optional[int] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A_ : List[str] = 3
A_ : List[str] = do_lower_case
A_ : Tuple = remove_space
A_ : Tuple = keep_accents
A_ : Tuple = vocab_file
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
A_ : Tuple = jieba
A_ : int = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a (self ):
return len(self.sp_model )
def _a (self ):
A_ : str = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
A_ : Optional[Any] = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__(self , lowercase ):
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : List[Any] = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , lowercase ):
if self.remove_space:
A_ : Union[str, Any] = """ """.join(inputs.strip().split() )
else:
A_ : Optional[int] = inputs
A_ : List[str] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A_ : str = unicodedata.normalize("""NFKD""" , lowercase )
A_ : Union[str, Any] = """""".join([c for c in outputs if not unicodedata.combining(lowercase )] )
if self.do_lower_case:
A_ : int = outputs.lower()
return outputs
def _a (self , lowercase ):
A_ : Optional[int] = self.preprocess_text(lowercase )
A_ : Dict = self.sp_model.encode(lowercase , out_type=lowercase )
A_ : List[Any] = []
for piece in pieces:
if len(lowercase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A_ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Dict = cur_pieces[1:]
else:
A_ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase )
else:
new_pieces.append(lowercase )
return new_pieces
def _a (self , lowercase ):
return self.sp_model.PieceToId(lowercase )
def _a (self , lowercase ):
return self.sp_model.IdToPiece(lowercase )
def _a (self , lowercase ):
A_ : int = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def _a (self , lowercase , lowercase = None ):
A_ : List[str] = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a (self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1, 1]
return ([0] * len(lowercase )) + [1, 1]
def _a (self , lowercase , lowercase = None ):
A_ : str = [self.sep_token_id]
A_ : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a (self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Tuple = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
A_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _a (self , *lowercase , **lowercase ):
A_ : Any = super()._decode(*lowercase , **lowercase )
A_ : int = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 135
| 1
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__snake_case = logging.get_logger(__name__)
__snake_case = {}
__snake_case = {}
__snake_case = {}
def A_ ( _lowerCAmelCase : type, _lowerCAmelCase : Optional[str], _lowerCAmelCase : Optional[List[str]] = None, ):
"""simple docstring"""
_a = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
_a = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
_a = format_type
def A_ ( _lowerCAmelCase : Exception, _lowerCAmelCase : Optional[str], _lowerCAmelCase : Optional[List[str]] = None ):
"""simple docstring"""
_a = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_a = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__snake_case = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__snake_case = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__snake_case = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def A_ ( _lowerCAmelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A_ ( _lowerCAmelCase : Optional[str], **_lowerCAmelCase : Dict ):
"""simple docstring"""
_a = get_format_type_from_alias(_lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 320
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {'''shortest_edge''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 320
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowercase__ = 8
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=BITS ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = x.device
lowerCAmelCase_ : str = (x * 255).int().clamp(0 , 255 )
lowerCAmelCase_ : List[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case_ )
lowerCAmelCase_ : Optional[int] = rearrange(snake_case_ , "d -> d 1 1" )
lowerCAmelCase_ : Any = rearrange(snake_case_ , "b c h w -> b c 1 h w" )
lowerCAmelCase_ : str = ((x & mask) != 0).float()
lowerCAmelCase_ : List[str] = rearrange(snake_case_ , "b c d h w -> b (c d) h w" )
lowerCAmelCase_ : Optional[Any] = bits * 2 - 1
return bits
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=BITS ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : List[str] = x.device
lowerCAmelCase_ : Optional[int] = (x > 0).int()
lowerCAmelCase_ : int = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case_ , dtype=torch.intaa )
lowerCAmelCase_ : int = rearrange(snake_case_ , "d -> d 1 1" )
lowerCAmelCase_ : List[Any] = rearrange(snake_case_ , "b (c d) h w -> b c d h w" , d=8 )
lowerCAmelCase_ : Tuple = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = True , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCAmelCase_ : str = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCAmelCase_ : List[Any] = self.alphas_cumprod[timestep]
lowerCAmelCase_ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCAmelCase_ : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCAmelCase_ : str = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase_ : Union[str, Any] = torch.clamp(snake_case_ , -scale , snake_case_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCAmelCase_ : Union[str, Any] = self._get_variance(snake_case_ , snake_case_ )
lowerCAmelCase_ : int = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCAmelCase_ : Tuple = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase_ : Optional[int] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase_ : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCAmelCase_ : int = model_output.device if torch.is_tensor(snake_case_ ) else "cpu"
lowerCAmelCase_ : Any = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case_ ).to(snake_case_ )
lowerCAmelCase_ : Union[str, Any] = self._get_variance(snake_case_ , snake_case_ ) ** 0.5 * eta * noise
lowerCAmelCase_ : Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="epsilon" , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCAmelCase_ , lowerCAmelCase_ : int = torch.split(snake_case_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase_ : List[Any] = None
# 1. compute alphas, betas
lowerCAmelCase_ : Tuple = self.alphas_cumprod[t]
lowerCAmelCase_ : Tuple = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCAmelCase_ : Union[str, Any] = 1 - alpha_prod_t
lowerCAmelCase_ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCAmelCase_ : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCAmelCase_ : Optional[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCAmelCase_ : List[Any] = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase_ : str = torch.clamp(snake_case_ , -scale , snake_case_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase_ : str = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCAmelCase_ : Optional[Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase_ : Tuple = 0
if t > 0:
lowerCAmelCase_ : List[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case_ ).to(model_output.device )
lowerCAmelCase_ : int = (self._get_variance(snake_case_ , predicted_variance=snake_case_ ) ** 0.5) * noise
lowerCAmelCase_ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ )
class __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , a_ : UNetaDConditionModel , a_ : Union[DDIMScheduler, DDPMScheduler] , a_ : Optional[float] = 1.0 , ):
super().__init__()
lowerCAmelCase_ : List[str] = bit_scale
lowerCAmelCase_ : Any = (
ddim_bit_scheduler_step if isinstance(__a , __a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : List[Any] , a_ : Optional[int] = 2_56 , a_ : Optional[int] = 2_56 , a_ : Optional[int] = 50 , a_ : Optional[torch.Generator] = None , a_ : Optional[int] = 1 , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : Union[str, Any] , ):
lowerCAmelCase_ : str = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__a , )
lowerCAmelCase_ : str = decimal_to_bits(__a ) * self.bit_scale
lowerCAmelCase_ : Any = latents.to(self.device )
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCAmelCase_ : List[Any] = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : List[Any] = self.scheduler.step(__a , __a , __a ).prev_sample
lowerCAmelCase_ : Optional[Any] = bits_to_decimal(__a )
if output_type == "pil":
lowerCAmelCase_ : int = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 368
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase__ = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowercase__ = {"""facebook/blenderbot-3B""": 128}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Dict = VOCAB_FILES_NAMES
a_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[int] = ["""input_ids""", """attention_mask"""]
a_ : int = BlenderbotTokenizer
def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Any=None , a_ : int=None , a_ : str="replace" , a_ : Tuple="<s>" , a_ : Optional[int]="</s>" , a_ : Union[str, Any]="</s>" , a_ : Union[str, Any]="<s>" , a_ : Optional[Any]="<unk>" , a_ : str="<pad>" , a_ : List[Any]="<mask>" , a_ : Tuple=False , a_ : Dict=True , **a_ : str , ):
super().__init__(
a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , )
lowerCAmelCase_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : str = getattr(a_ , pre_tok_state.pop("type" ) )
lowerCAmelCase_ : int = add_prefix_space
lowerCAmelCase_ : List[Any] = pre_tok_class(**a_ )
lowerCAmelCase_ : Any = add_prefix_space
lowerCAmelCase_ : str = "post_processor"
lowerCAmelCase_ : str = getattr(self.backend_tokenizer , a_ , a_ )
if tokenizer_component_instance:
lowerCAmelCase_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ : Dict = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase_ : Optional[int] = tuple(state["cls"] )
lowerCAmelCase_ : Optional[int] = False
if state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : List[str] = add_prefix_space
lowerCAmelCase_ : Any = True
if state.get("trim_offsets" , a_ ) != trim_offsets:
lowerCAmelCase_ : int = trim_offsets
lowerCAmelCase_ : List[str] = True
if changes_to_apply:
lowerCAmelCase_ : Optional[Any] = getattr(a_ , state.pop("type" ) )
lowerCAmelCase_ : Tuple = component_class(**a_ )
setattr(self.backend_tokenizer , a_ , a_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase ( self : int ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase ( self : int , a_ : List[Any] ):
lowerCAmelCase_ : Optional[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value
lowerCAmelCase_ : Tuple = value
def lowerCamelCase ( self : int , *a_ : List[str] , **a_ : Optional[int] ):
lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : str , *a_ : Union[str, Any] , **a_ : List[str] ):
lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : int , a_ : str , a_ : Optional[str] = None ):
lowerCAmelCase_ : str = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def lowerCamelCase ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Union[str, Any] , a_ : "Conversation" ):
lowerCAmelCase_ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(a_ )
lowerCAmelCase_ : Tuple = " ".join(a_ )
lowerCAmelCase_ : Any = self.encode(a_ )
if len(a_ ) > self.model_max_length:
lowerCAmelCase_ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 161
| 0
|
def UpperCamelCase (lowercase_: str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
A__ : str = set()
# Replace all the whitespace in our sentence
A__ : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase_ ) == 26
def UpperCamelCase (lowercase_: str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
A__ : Union[str, Any] = [False] * 26
for char in input_str:
if char.islower():
A__ : Optional[int] = True
elif char.isupper():
A__ : Optional[Any] = True
return all(lowercase_ )
def UpperCamelCase (lowercase_: str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase () -> None:
from timeit import timeit
A__ : Dict = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=lowercase_ ) )
print(timeit("""is_pangram_faster()""" , setup=lowercase_ ) )
print(timeit("""is_pangram_fastest()""" , setup=lowercase_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 192
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] ) -> List[str]:
A__ : Tuple = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
A__ : List[Any] = DatasetInfosDict.from_directory(lowercase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def UpperCamelCase (lowercase_: str , lowercase_: DatasetInfo ) -> List[Any]:
A__ : Union[str, Any] = str(lowercase_ )
dataset_info.write_to_directory(lowercase_ )
A__ : List[Any] = DatasetInfo.from_directory(lowercase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowercase_ , """dataset_info.json""" ) )
def UpperCamelCase () -> List[Any]:
A__ : Union[str, Any] = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
A__ : Dict = dataset_info._to_yaml_dict()
assert sorted(lowercase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
A__ : Union[str, Any] = yaml.safe_dump(lowercase_ )
A__ : List[Any] = yaml.safe_load(lowercase_ )
assert dataset_info_yaml_dict == reloaded
def UpperCamelCase () -> List[str]:
A__ : Optional[int] = DatasetInfo()
A__ : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCamelCase (lowercase_: Tuple , lowercase_: DatasetInfosDict ) -> Optional[Any]:
A__ : List[Any] = str(lowercase_ )
dataset_infos_dict.write_to_directory(lowercase_ )
A__ : Dict = DatasetInfosDict.from_directory(lowercase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A__ : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A__ : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowercase_ , """README.md""" ) )
| 192
| 1
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 360
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['pixel_values']
def __init__( self : Tuple , lowercase__ : bool = True , lowercase__ : Dict[str, int] = None , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : bool = True , lowercase__ : Union[int, float] = 1 / 255 , lowercase__ : bool = True , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : bool = True , **lowercase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**lowercase__)
lowerCAmelCase__ = size if size is not None else {'height': 384, 'width': 384}
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ = do_convert_rgb
def __snake_case ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Dict[str, int] , lowercase__ : PILImageResampling = PILImageResampling.BICUBIC , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""")
lowerCAmelCase__ = (size['height'], size['width'])
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : List[str] , lowercase__ : np.ndarray , lowercase__ : Union[int, float] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : Optional[Any] , lowercase__ : np.ndarray , lowercase__ : Union[float, List[float]] , lowercase__ : Union[float, List[float]] , lowercase__ : Optional[Union[str, ChannelDimension]] = None , **lowercase__ : Any , ):
'''simple docstring'''
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__)
def __snake_case ( self : Any , lowercase__ : ImageInput , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Dict[str, int]] = None , lowercase__ : PILImageResampling = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[float] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[float, List[float]]] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : bool = None , lowercase__ : ChannelDimension = ChannelDimension.FIRST , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__)
lowerCAmelCase__ = make_list_of_images(lowercase__)
if not valid_images(lowercase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ = [convert_to_rgb(lowercase__) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowercase__) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowercase__ , scale=lowercase__) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowercase__ , lowercase__) for image in images]
lowerCAmelCase__ = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase__)
return encoded_outputs
| 119
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
SCREAMING_SNAKE_CASE_ = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
SCREAMING_SNAKE_CASE_ = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = "whisper"
__snake_case : str = ["past_key_values"]
__snake_case : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Union[str, Any]=51865 ,lowerCamelCase__ : int=80 ,lowerCamelCase__ : List[str]=6 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Optional[Any]=6 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Tuple=1536 ,lowerCamelCase__ : Optional[int]=1536 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[str]=50257 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Any="gelu" ,lowerCamelCase__ : str=256 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Dict=0.02 ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : str=1500 ,lowerCamelCase__ : List[Any]=448 ,lowerCamelCase__ : Any=50256 ,lowerCamelCase__ : Union[str, Any]=50256 ,lowerCamelCase__ : List[Any]=50256 ,lowerCamelCase__ : int=None ,lowerCamelCase__ : List[str]=[220, 50256] ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : int=0.05 ,lowerCamelCase__ : Optional[int]=10 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Optional[Any]=10 ,lowerCamelCase__ : int=0 ,lowerCamelCase__ : List[str]=7 ,**lowerCamelCase__ : List[str] ,) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = num_mel_bins
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE = max_source_positions
SCREAMING_SNAKE_CASE = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
SCREAMING_SNAKE_CASE = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,suppress_tokens=lowerCamelCase__ ,begin_suppress_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE = {0: """batch"""}
else:
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ ,direction="""inputs""" )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional["TensorType"] = None ,lowerCamelCase__ : int = 22050 ,lowerCamelCase__ : float = 5.0 ,lowerCamelCase__ : int = 220 ,) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OrderedDict()
SCREAMING_SNAKE_CASE = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=lowerCamelCase__ ,framework=lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,time_duration=lowerCamelCase__ ,frequency=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = encoder_inputs["""input_features"""].shape[2]
SCREAMING_SNAKE_CASE = encoder_sequence_length // 2 if self.use_past else seq_length
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(
preprocessor.tokenizer ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = encoder_inputs.pop("""input_features""" )
SCREAMING_SNAKE_CASE = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
SCREAMING_SNAKE_CASE = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1e-3
| 296
|
from collections import defaultdict
from math import gcd
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ):
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase__ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCamelCase__ = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split()
lowerCamelCase__ = '''|'''.join(sys.argv[1:])
lowerCamelCase__ = re.compile(RF'''^({joined_dirs}).*?\.py$''')
lowerCamelCase__ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 357
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A(__a: Any , __a: Union[str, Any] , __a: List[str] ):
lowerCAmelCase_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowerCAmelCase_ = F"{src_lang}-{tgt_lang}"
lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase_ = os.path.join(__a , "README.md" )
print(F"Generating {path}" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''')
lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 22
| 0
|
"""simple docstring"""
import numpy as np
def lowercase_ ( _lowerCamelCase: np.array ) -> np.array:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowerCamelCase: np.array ) -> np.array:
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
|
"""simple docstring"""
__A = [0, 2, 4, 6, 8]
__A = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: list[int] , _lowerCamelCase: int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase : Union[str, Any] = 0
for digit in range(10 ):
__lowerCamelCase : Tuple = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase )
return result
__lowerCamelCase : List[str] = 0
for digita in range(10 ):
__lowerCamelCase : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase : Any = ODD_DIGITS
else:
__lowerCamelCase : Dict = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase : int = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase: int = 9 ) -> int:
'''simple docstring'''
__lowerCamelCase : List[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 135
| 1
|
from __future__ import annotations
from collections import deque
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :list[dict] = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(UpperCamelCase_ )
self.set_fail_transitions()
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = 0
for character in keyword:
lowercase_ :Optional[Any] = self.find_next_state(UpperCamelCase_ , UpperCamelCase_ )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase_ :List[str] = len(self.adlist ) - 1
else:
lowercase_ :Dict = next_state
self.adlist[current_state]["output"].append(UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCamelCase_ )
lowercase_ :List[Any] = 0
while q:
lowercase_ :int = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCamelCase_ )
lowercase_ :Optional[int] = self.adlist[r]['''fail_state''']
while (
self.find_next_state(UpperCamelCase_ , self.adlist[child]['''value'''] ) is None
and state != 0
):
lowercase_ :Dict = self.adlist[state]['''fail_state''']
lowercase_ :str = self.find_next_state(
UpperCamelCase_ , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
lowercase_ :str = 0
lowercase_ :Any = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :dict = {} # returns a dict with keywords and list of its occurrences
lowercase_ :List[str] = 0
for i in range(len(UpperCamelCase_ ) ):
while (
self.find_next_state(UpperCamelCase_ , string[i] ) is None
and current_state != 0
):
lowercase_ :Tuple = self.adlist[current_state]['''fail_state''']
lowercase_ :Tuple = self.find_next_state(UpperCamelCase_ , string[i] )
if next_state is None:
lowercase_ :Optional[Any] = 0
else:
lowercase_ :int = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase_ :Optional[Any] = []
result[key].append(i - len(UpperCamelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : Optional[str] =field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """The column name of the images in the files."""} )
lowercase : Optional[str] =field(default=lowercase__ , metadata={"""help""": """A folder containing the training data."""} )
lowercase : Optional[str] =field(default=lowercase__ , metadata={"""help""": """A folder containing the validation data."""} )
lowercase : Optional[float] =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase ( self ):
lowercase_ :int = {}
if self.train_dir is not None:
lowercase_ :Union[str, Any] = self.train_dir
if self.validation_dir is not None:
lowercase_ :int = self.validation_dir
lowercase_ :str = data_files if data_files else None
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : str =field(
default=lowercase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase : str =field(default=lowercase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase : bool =field(
default=lowercase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase : float =field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase : bool =field(
default=lowercase__ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : float =field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
lowercase_ :Tuple = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowercase_ :str = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ :Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , _a , _a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ :Dict = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowercase_ :Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ :List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase_ :Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ :Dict = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a ) and data_args.train_val_split > 0.0:
lowercase_ :int = ds['''train'''].train_test_split(data_args.train_val_split )
lowercase_ :Tuple = split['''train''']
lowercase_ :Optional[int] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ :int = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase_ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **_a )
elif model_args.model_name_or_path:
lowercase_ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_a )
else:
lowercase_ :str = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ :int = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_a )
elif model_args.model_name_or_path:
lowercase_ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_a )
else:
lowercase_ :Optional[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase_ :Dict = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase_ :str = ViTMAEForPreTraining(_a )
if training_args.do_train:
lowercase_ :str = ds['''train'''].column_names
else:
lowercase_ :Tuple = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase_ :Optional[Any] = data_args.image_column_name
elif "image" in column_names:
lowercase_ :str = '''image'''
elif "img" in column_names:
lowercase_ :Any = '''img'''
else:
lowercase_ :Optional[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase_ :int = image_processor.size['''shortest_edge''']
else:
lowercase_ :Union[str, Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase_ :List[str] = Compose(
[
Lambda(lambda _a : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_a , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_a ):
lowercase_ :List[Any] = [transforms(_a ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase_ :Tuple = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_a )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase_ :str = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_a )
# Compute absolute learning rate
lowercase_ :Any = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase_ :str = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
lowercase_ :Any = Trainer(
model=_a , args=_a , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
lowercase_ :Any = None
if training_args.resume_from_checkpoint is not None:
lowercase_ :Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ :Tuple = last_checkpoint
lowercase_ :List[Any] = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ :str = trainer.evaluate()
trainer.log_metrics('''eval''' , _a )
trainer.save_metrics('''eval''' , _a )
# Write model card and (optionally) push to hub
lowercase_ :List[Any] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 252
| 0
|
import re
def UpperCamelCase( __UpperCamelCase : str ):
lowerCAmelCase_ : str = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__UpperCamelCase ,__UpperCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 103
|
'''simple docstring'''
from __future__ import annotations
def snake_case ( UpperCAmelCase )-> list[int]:
"""simple docstring"""
__A = 2
__A = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase )
if n > 1:
factors.append(UpperCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161
| 0
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
snake_case__ : int = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class snake_case_:
def __init__( self : str , UpperCamelCase_ : int = 1_4 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
lowerCAmelCase : str = primes[group]['''prime''']
lowerCAmelCase : Tuple = primes[group]['''generator''']
lowerCAmelCase : Optional[Any] = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def lowerCamelCase__ ( self : Optional[int] ):
return hex(self.__private_key )[2:]
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[str] = pow(self.generator , self.__private_key , self.prime )
return hex(UpperCamelCase_ )[2:]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(UpperCamelCase_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str ):
lowerCAmelCase : List[str] = int(UpperCamelCase_ , base=1_6 )
if not self.is_valid_public_key(UpperCamelCase_ ):
raise ValueError('''Invalid public key''' )
lowerCAmelCase : List[Any] = pow(UpperCamelCase_ , self.__private_key , self.prime )
return shaaaa(str(UpperCamelCase_ ).encode() ).hexdigest()
@staticmethod
def lowerCamelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(UpperCamelCase_ , (prime - 1) // 2 , UpperCamelCase_ ) == 1
)
@staticmethod
def lowerCamelCase__ ( UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int = 1_4 ):
lowerCAmelCase : Optional[Any] = int(UpperCamelCase_ , base=1_6 )
lowerCAmelCase : int = int(UpperCamelCase_ , base=1_6 )
lowerCAmelCase : Tuple = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError('''Invalid public key''' )
lowerCAmelCase : Any = pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return shaaaa(str(UpperCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
class snake_case_( a__ ):
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , UpperCamelCase_ : CLIPConfig ):
super().__init__(UpperCamelCase_ )
lowerCAmelCase : str = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=0.5 , UpperCamelCase_ : List[str]=0.5 ):
lowerCAmelCase : List[Any] = self.vision_model(UpperCamelCase_ )[0]
lowerCAmelCase : Tuple = self.p_head(UpperCamelCase_ )
lowerCAmelCase : Any = nsfw_detected.flatten()
lowerCAmelCase : Dict = nsfw_detected > p_threshold
lowerCAmelCase : int = nsfw_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase_ ):
if nsfw_detected_:
lowerCAmelCase : List[Any] = np.zeros(images[idx].shape )
lowerCAmelCase : Union[str, Any] = self.w_head(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = watermark_detected.flatten()
lowerCAmelCase : Optional[int] = watermark_detected > w_threshold
lowerCAmelCase : Union[str, Any] = watermark_detected.tolist()
if any(UpperCamelCase_ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase_ ):
if watermark_detected_:
lowerCAmelCase : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 314
| 1
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 28
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__UpperCAmelCase = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__UpperCAmelCase = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ''' Hello world! cécé herlolip'''
__UpperCAmelCase = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> List[str]:
UpperCamelCase : int = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int ) -> Any:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : Optional[Any] = val
def UpperCamelCase ( snake_case__ : Dict ) -> Tuple:
UpperCamelCase : int = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def UpperCamelCase ( snake_case__ : List[str] ) -> Dict:
UpperCamelCase , UpperCamelCase : str = emb.weight.shape
UpperCamelCase : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
UpperCamelCase : List[str] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=None ) -> Optional[Any]:
if not os.path.exists(snake_case__ ):
UpperCamelCase : List[str] = torch.hub.load('pytorch/fairseq' , snake_case__ ).eval()
else:
UpperCamelCase : int = load_xsum_checkpoint(snake_case__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase : Tuple = checkpoint_path.replace('.' , '-' )
UpperCamelCase : Optional[int] = BartConfig.from_pretrained(snake_case__ )
UpperCamelCase : Optional[Any] = bart.encode(snake_case__ ).unsqueeze(0 )
UpperCamelCase : Any = BartTokenizer.from_pretrained(snake_case__ ).encode(snake_case__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(snake_case__ , snake_case__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase : Union[str, Any] = bart.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : int = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Any = BartForSequenceClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Any = bart.predict('mnli' , snake_case__ , return_logits=snake_case__ )
UpperCamelCase : Tuple = model(snake_case__ )[0] # logits
else: # no classification heads to worry about
UpperCamelCase : List[str] = bart.model.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : List[str] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase : Union[str, Any] = bart.extract_features(snake_case__ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase : List[str] = BartModel(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Optional[int] = model(snake_case__ ).model[0]
else:
UpperCamelCase : Union[str, Any] = BartForConditionalGeneration(snake_case__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(snake_case__ )
if hasattr(snake_case__ , 'lm_head' ):
UpperCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
UpperCamelCase : Dict = model.model(snake_case__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__UpperCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 119
| 0
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 ) ->int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : float, lowercase__ : float, lowercase__ : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def __UpperCamelCase ( lowercase__ : float, lowercase__ : float, lowercase__ : float, ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __UpperCamelCase ( lowercase__ : float, lowercase__ : float, lowercase__ : float, ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
__lowercase, nominal_annual_percentage_rate / 3_65, number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :int = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : int = PegasusTokenizerFast
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[str] = True
def lowercase ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowercase ( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Any ):
return ("This is a test", "This is a test")
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = "</s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(snake_case_ ) , 1_1_0_3 )
def lowercase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
_UpperCAmelCase = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
_UpperCAmelCase = "To ensure a smooth flow of bank resolutions."
_UpperCAmelCase = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
_UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase ( self : int ):
_UpperCAmelCase = ["This is going to be way too long." * 1_5_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
@slow
def lowercase ( self : Dict ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = PegasusTokenizer
_lowerCamelCase : List[Any] = PegasusTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = True
def lowercase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = PegasusTokenizer(snake_case_ , offset=0 , mask_token_sent=snake_case_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ):
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowercase ( self : Optional[Any] , **snake_case_ : Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return ("This is a test", "This is a test")
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
_UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
_UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=snake_case_ , add_special_tokens=snake_case_ ).input_ids[0]
self.assertListEqual(snake_case_ , snake_case_ )
@require_torch
def lowercase ( self : Tuple ):
_UpperCAmelCase = ["This is going to be way too long." * 1_0_0_0, "short example"]
_UpperCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_UpperCAmelCase = self._large_tokenizer(snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = self._large_tokenizer(
text_target=snake_case_ , max_length=5 , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case_ ) == 2 # input_ids, attention_mask.
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
_UpperCAmelCase = self._large_tokenizer(snake_case_ ).input_ids
self.assertListEqual(
snake_case_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 22
| 0
|
'''simple docstring'''
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase )
if number < 1:
A = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCAmelCase )
A = 1
for i in range(1 , UpperCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337
| 1
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a_ : Optional[int] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
a_ : Tuple = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
a_ : Any = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
a_ : Tuple = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
a_ : Optional[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=[1, 10, 100] , UpperCamelCase=4 , UpperCamelCase=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=UpperCamelCase ) as executor:
lowerCamelCase_ = []
lowerCamelCase_ = Counter()
lowerCamelCase_ = 0
lowerCamelCase_ = defaultdict(UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(UpperCamelCase , UpperCamelCase ) ):
for candidate in candidates:
lowerCamelCase_ = candidate + "\n" + test_case
lowerCamelCase_ = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase_ = executor.submit(UpperCamelCase , *UpperCamelCase )
futures.append(UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCamelCase ):
lowerCamelCase_ = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
lowerCamelCase_ ,lowerCamelCase_ = [], []
for result in results.values():
result.sort()
lowerCamelCase_ = [r[1]["passed"] for r in result]
total.append(len(UpperCamelCase ) )
correct.append(sum(UpperCamelCase ) )
lowerCamelCase_ = np.array(UpperCamelCase )
lowerCamelCase_ = np.array(UpperCamelCase )
lowerCamelCase_ = k
lowerCamelCase_ = {f'''pass@{k}''': estimate_pass_at_k(UpperCamelCase , UpperCamelCase , UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
def estimator(UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = itertools.repeat(lowerCamelCase__ , len(lowerCamelCase__ ) )
else:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
lowerCamelCase_ = iter(lowerCamelCase__ )
return np.array([estimator(int(lowerCamelCase__ ) , int(lowerCamelCase__ ) , lowerCamelCase__ ) for n, c in zip(lowerCamelCase__ , lowerCamelCase__ )] )
| 55
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252
| 0
|
# Lint as: python3
import itertools
import os
import re
A_ : Dict = re.compile(r'([A-Z]+)([A-Z][a-z])')
A_ : List[str] = re.compile(r'([a-z\d])([A-Z])')
A_ : Optional[int] = re.compile(r'(?<!_)_(?!_)')
A_ : Tuple = re.compile(r'(_{2,})')
A_ : Tuple = r'^\w+(\.\w+)*$'
A_ : List[str] = r'<>:/\|?*'
def snake_case (UpperCAmelCase__ ) -> Dict:
UpperCamelCase_: List[Any] = _uppercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase__ )
UpperCamelCase_: int = _lowercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase__ )
return name.lower()
def snake_case (UpperCAmelCase__ ) -> Optional[int]:
UpperCamelCase_: int = _single_underscore_re.split(UpperCAmelCase__ )
UpperCamelCase_: Dict = [_multiple_underscores_re.split(UpperCAmelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCAmelCase__ ) if n != '' )
def snake_case (UpperCAmelCase__ ) -> Dict:
if os.path.basename(UpperCAmelCase__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
if os.path.basename(UpperCAmelCase__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , UpperCAmelCase__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(UpperCAmelCase__ )}-{split}'''
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Union[str, Any]:
UpperCamelCase_: List[Any] = filename_prefix_for_split(UpperCAmelCase__ , UpperCAmelCase__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
UpperCamelCase_: Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
return F'''{filepath}*'''
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None ) -> List[Any]:
UpperCamelCase_: Dict = filename_prefix_for_split(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Dict = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if shard_lengths:
UpperCamelCase_: str = len(UpperCAmelCase__ )
UpperCamelCase_: Dict = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(UpperCAmelCase__ )]
if filetype_suffix:
UpperCamelCase_: List[str] = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
UpperCamelCase_: Optional[Any] = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 361
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case (UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: int = [False] * len(UpperCAmelCase__ )
UpperCamelCase_: Any = [-1] * len(UpperCAmelCase__ )
def dfs(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Tuple = True
UpperCamelCase_: Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase__ , 1 - c )
for i in range(len(UpperCAmelCase__ ) ):
if not visited[i]:
dfs(UpperCAmelCase__ , 0 )
for i in range(len(UpperCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
A_ : Dict = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292
| 0
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-1'''
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-2'''
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-3'''
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
class UpperCamelCase__ ( a__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A = True , ) -> str:
super()._init_()
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _UpperCamelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def _UpperCamelCase ( self , _A = "auto" ) -> Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def _UpperCamelCase ( self ) -> List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def _UpperCamelCase ( self , _A , _A = 512 , _A = 512 , _A = 50 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ) -> str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self , _A , _A = 512 , _A = 512 , _A = 50 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ) -> Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self , _A , _A = 512 , _A = 512 , _A = 50 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ) -> Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self , _A , _A = 512 , _A = 512 , _A = 50 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ) -> Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self , _A , _A = 512 , _A = 512 , _A = 50 , _A = 7.5 , _A = None , _A = 1 , _A = 0.0 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , **_A , ) -> Dict:
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE_ = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE_ = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE_ = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE_ = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 299
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
| 0
|
import argparse
import os
import re
UpperCAmelCase__ = 'src/transformers'
# Pattern that looks at the indentation in a line.
UpperCAmelCase__ = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCAmelCase__ = re.compile(r'^\s*\"([^\"]+)\":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCAmelCase__ = re.compile(r'^\s*_import_structure\[\"([^\"]+)\"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCAmelCase__ = re.compile(r'^\s*\"([^\"]+)\",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCAmelCase__ = re.compile(r'\[([^\]]+)\]')
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]:
_snake_case = _re_indent.search(lowerCAmelCase__ )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Union[str, Any]=None ) -> Optional[Any]:
_snake_case = 0
_snake_case = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__ ):
index += 1
_snake_case = ['''\n'''.join(lines[:index] )]
else:
_snake_case = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_snake_case = [lines[index]]
index += 1
while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
if index < len(lowerCAmelCase__ ) - 1:
_snake_case = [lines[index + 1]]
index += 1
else:
_snake_case = []
else:
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
_snake_case = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__ ) > 0:
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( __lowerCamelCase : str ) -> Dict:
def _inner(__lowerCamelCase : List[str] ):
return key(lowerCAmelCase__ ).lower().replace('''_''' , '''''' )
return _inner
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : str=None ) -> int:
# If no key is provided, we use a noop.
def noop(__lowerCamelCase : int ):
return x
if key is None:
_snake_case = noop
# Constants are all uppercase, they go first.
_snake_case = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_snake_case = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
_snake_case = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()]
_snake_case = ignore_underscore(lowerCAmelCase__ )
return sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ )
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__lowerCamelCase : Union[str, Any] ):
_snake_case = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
_snake_case = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_snake_case = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] ) + "]"
_snake_case = import_statement.split('''\n''' )
if len(lowerCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_snake_case = 2 if lines[1].strip() == '''[''' else 1
_snake_case = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_snake_case = sort_objects(lowerCAmelCase__ , key=lambda __lowerCamelCase : x[1] )
_snake_case = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_snake_case = _re_bracket_content.sub(_replace , lines[1] )
else:
_snake_case = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_snake_case = keys[:-1]
_snake_case = get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] )
return "\n".join(lowerCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
_snake_case = _re_bracket_content.sub(_replace , lowerCAmelCase__ )
return import_statement
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple=True ) -> str:
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
_snake_case = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_snake_case = split_code_in_indented_blocks(
lowerCAmelCase__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_snake_case = main_blocks[block_idx]
_snake_case = block.split('''\n''' )
# Get to the start of the imports.
_snake_case = 0
while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_snake_case = len(lowerCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_snake_case = '''\n'''.join(block_lines[line_idx:-1] )
_snake_case = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_snake_case = split_code_in_indented_blocks(lowerCAmelCase__ , indent_level=lowerCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_snake_case = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_snake_case = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_snake_case = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None]
_snake_case = [x[0] for x in sorted(lowerCAmelCase__ , key=lambda __lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_snake_case = 0
_snake_case = []
for i in range(len(lowerCAmelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_snake_case = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
_snake_case = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase__ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCAmelCase__ ) )
def _UpperCAmelCase ( __lowerCamelCase : Optional[int]=True ) -> Optional[int]:
_snake_case = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
_snake_case = sort_imports(os.path.join(lowerCAmelCase__ , '''__init__.py''' ) , check_only=lowerCAmelCase__ )
if result:
_snake_case = [os.path.join(lowerCAmelCase__ , '''__init__.py''' )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(f'''Would overwrite {len(lowerCAmelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
UpperCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 361
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase__ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase__ = model.state_dict()
UpperCAmelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase__ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"]
UpperCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __UpperCamelCase :
"""simple docstring"""
def UpperCAmelCase__ ( self : Any , _A : Any ):
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : "AutoTokenizer" , _A : bool = False , **_A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer
__SCREAMING_SNAKE_CASE : Any = skip_prompt
__SCREAMING_SNAKE_CASE : Union[str, Any] = decode_kwargs
# variables used in the streaming process
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : str = True
def UpperCAmelCase__ ( self : Optional[int] , _A : Dict ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
__SCREAMING_SNAKE_CASE : Any = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__SCREAMING_SNAKE_CASE : str = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
__SCREAMING_SNAKE_CASE : Dict = text[self.print_len :]
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : List[str] = 0
# If the last token is a CJK character, we print the characters.
elif len(_A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = text[self.print_len :]
self.print_len += len(_A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__SCREAMING_SNAKE_CASE : int = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(_A )
self.on_finalized_text(_A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
if len(self.token_cache ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__SCREAMING_SNAKE_CASE : List[str] = text[self.print_len :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Any = 0
else:
__SCREAMING_SNAKE_CASE : Tuple = ''''''
__SCREAMING_SNAKE_CASE : str = True
self.on_finalized_text(_A , stream_end=_A )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : str , _A : bool = False ):
"""simple docstring"""
print(_A , flush=_A , end='''''' if not stream_end else None )
def UpperCAmelCase__ ( self : List[Any] , _A : str ):
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : "AutoTokenizer" , _A : bool = False , _A : Optional[float] = None , **_A : Union[str, Any] ):
"""simple docstring"""
super().__init__(_A , _A , **_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = Queue()
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Optional[Any] = timeout
def UpperCAmelCase__ ( self : Optional[Any] , _A : str , _A : bool = False ):
"""simple docstring"""
self.text_queue.put(_A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : List[Any] ):
"""simple docstring"""
return self
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 303
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303
| 1
|
from __future__ import annotations
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = len(_A )
# We need to create solution object to save path.
lowerCAmelCase_ = [[0 for _ in range(_A )] for _ in range(_A )]
lowerCAmelCase_ = run_maze(_A , 0 , 0 , _A )
if solved:
print('''\n'''.join(str(_A ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __UpperCamelCase ( _A , _A , _A , _A ):
lowerCAmelCase_ = len(_A )
# Final check point.
if i == j == (size - 1):
lowerCAmelCase_ = 1
return True
lowerCAmelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
lowerCAmelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCAmelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCAmelCase_ = 1
# check for directions
if (
run_maze(_A , i + 1 , _A , _A )
or run_maze(_A , _A , j + 1 , _A )
or run_maze(_A , i - 1 , _A , _A )
or run_maze(_A , _A , j - 1 , _A )
):
return True
lowerCAmelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_A = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
_A , _A = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
_A = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
_A = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_A = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 167
| 1
|
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 1:
lowercase : str = f"""Input value of [number={number}] must be > 0"""
raise ValueError(_UpperCamelCase )
lowercase : str = 1
for i in range(1, _UpperCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337
|
from typing import List
from .keymap import KEYMAP, get_character
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += [key]
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
def __lowercase ( *_UpperCamelCase ) ->Any:
"""simple docstring"""
def decorator(_UpperCamelCase ):
lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] )
handle += keys
setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase )
return func
return decorator
class __SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ):
setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} )
setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase : List[Any] = value
return new_cls
@staticmethod
def __lowerCamelCase ( cls ):
lowercase : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
lowercase : Tuple = char
return handler(cls )
else:
return None
def __lowercase ( cls ) ->Any:
"""simple docstring"""
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 337
| 1
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : str = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = SpeechTaTokenizer
__magic_name__ = False
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Optional[Any] = SpeechTaTokenizer(snake_case__ )
_lowerCAmelCase : Tuple = AddedToken('<mask>' , lstrip=snake_case__ , rstrip=snake_case__ )
_lowerCAmelCase : Tuple = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'this is a test'
_lowerCAmelCase : Dict = 'this is a test'
return input_text, output_text
def a ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.get_input_output_texts(snake_case__ )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCAmelCase : int = tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
return text, ids
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = '<pad>'
_lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(snake_case__ ) , 81 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_lowerCAmelCase : int = tokenizer.vocab_size
_lowerCAmelCase : Dict = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCAmelCase : List[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_lowerCAmelCase : Union[str, Any] = tokenizer.add_tokens(snake_case__ )
_lowerCAmelCase : Any = tokenizer.vocab_size
_lowerCAmelCase : Optional[int] = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , len(snake_case__ ) )
self.assertEqual(snake_case__ , all_size + len(snake_case__ ) )
_lowerCAmelCase : Tuple = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=snake_case__ )
self.assertGreaterEqual(len(snake_case__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_lowerCAmelCase : Union[str, Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_lowerCAmelCase : Optional[Any] = tokenizer.add_special_tokens(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer.vocab_size
_lowerCAmelCase : Union[str, Any] = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , len(snake_case__ ) )
self.assertEqual(snake_case__ , all_size_a + len(snake_case__ ) )
_lowerCAmelCase : List[Any] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=snake_case__ )
self.assertGreaterEqual(len(snake_case__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : List[Any] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(snake_case__ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_lowerCAmelCase : str = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(snake_case__ )
# fmt: off
self.assertListEqual(snake_case__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_lowerCAmelCase : Dict = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=snake_case__ , )
| 25
|
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25
| 1
|
import baseaa
def __snake_case ( _UpperCAmelCase ):
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def __snake_case ( _UpperCAmelCase ):
return baseaa.aaadecode(_UpperCAmelCase ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
"""simple docstring"""
from __future__ import annotations
_snake_case : str = []
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A__ ( UpperCamelCase , UpperCamelCase ):
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = 1
solve(UpperCamelCase , row + 1 )
A = 0
return False
def A__ ( UpperCamelCase ):
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 292
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : Tuple = "facebook/wmt19-en-de"
snake_case : Any = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : Tuple = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : Any = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case : Tuple = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case : Union[str, Any] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case : Dict = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 41
|
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any]=0 ) -> str:
'''simple docstring'''
return sorted(_snake_case , key=lambda _snake_case : x[column] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[int]=float("inf" ) ) -> Tuple:
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _snake_case ):
__magic_name__ : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__magic_name__ : Any = current_dis
return min_dis
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : str , _snake_case : str=float("inf" ) ) -> Dict:
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _snake_case ):
for j in range(max(0 , i - 6 ) , _snake_case ):
__magic_name__ : str = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__magic_name__ : List[str] = current_dis
return min_dis
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> List[Any]:
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_snake_case , _snake_case )
# recursion
__magic_name__ : Tuple = points_counts // 2
__magic_name__ : Dict = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[:mid] , _snake_case )
__magic_name__ : Optional[int] = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[mid:] , points_counts - mid )
__magic_name__ : int = min(_snake_case , _snake_case )
__magic_name__ : Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_snake_case )
__magic_name__ : Tuple = dis_between_closest_in_strip(
_snake_case , len(_snake_case ) , _snake_case )
return min(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
__magic_name__ : Union[str, Any] = column_based_sort(_snake_case , column=0 )
__magic_name__ : List[Any] = column_based_sort(_snake_case , column=1 )
return (
closest_pair_of_points_sqr(
_snake_case , _snake_case , _snake_case )
) ** 0.5
if __name__ == "__main__":
snake_case : List[str] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 41
| 1
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__A = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__A = json.load(f)
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =FSMTForConditionalGeneration.from_pretrained(UpperCAmelCase_).to(UpperCAmelCase_)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
])
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =F"""facebook/wmt19-{pair}"""
lowerCamelCase__: str =self.get_tokenizer(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.get_model(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =bleu_data[pair]["src"]
lowerCamelCase__: Optional[int] =bleu_data[pair]["tgt"]
lowerCamelCase__: Union[str, Any] =tokenizer(UpperCAmelCase_ , return_tensors="pt" , truncation=UpperCAmelCase_ , padding="longest").to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCamelCase__: Union[str, Any] =tokenizer.batch_decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_)
lowerCamelCase__: str =calculate_bleu(UpperCAmelCase_ , UpperCAmelCase_)
print(UpperCAmelCase_)
self.assertGreaterEqual(scores["bleu"] , UpperCAmelCase_)
| 10
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ )
a , a : int = [i[0] for i in r], [i[1] for i in r]
a : Union[str, Any] = list(accumulate(A_ ) )
a : Optional[Any] = bisect(A_ , A_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase__ : str = logging.getLogger(__name__)
def UpperCamelCase_ ( lowerCAmelCase__ : torch.nn.Module , lowerCAmelCase__ : BnbQuantizationConfig , lowerCAmelCase__ : Union[str, os.PathLike] = None , lowerCAmelCase__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , lowerCAmelCase__ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase__ : bool = False , ) -> int:
"""simple docstring"""
lowerCAmelCase_ : str = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
lowerCAmelCase_ : List[Any] = []
# custom device map
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(device_map.keys() ) > 1:
lowerCAmelCase_ : Any = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : List[Any] = get_keys_to_not_convert(lowerCAmelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Optional[int] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase__ )
# compatibility with peft
lowerCAmelCase_ : int = load_in_abit
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : Dict = get_parameter_device(lowerCAmelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
# convert param to the right dtype
lowerCAmelCase_ : int = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase_ : List[Any] = name.replace('.weight' , '' ).replace('.bias' , '' )
lowerCAmelCase_ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase__ ):
param.to(lowerCAmelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f"The model device type is {model_device.type}. However, cuda is needed for quantization."
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
lowerCAmelCase_ : Optional[Any] = replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = get_quantized_model_device_map(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_memory=lowerCAmelCase__ , no_split_module_classes=lowerCAmelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Any = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase__ , offload_state_dict=lowerCAmelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase__ , device_map=lowerCAmelCase__ , offload_dir=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : int=None ) -> str:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Optional[Any] = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Optional[Any] = special_dtypes
lowerCAmelCase_ : List[str] = no_split_module_classes
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
lowerCAmelCase__ , low_zero=(device_map == 'balanced_low_0') , max_memory=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowerCAmelCase_ : int = max_memory
lowerCAmelCase_ : Any = infer_auto_device_map(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple=None ) -> int:
"""simple docstring"""
if modules_to_not_convert is None:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , ) -> int:
"""simple docstring"""
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : int = []
current_key_name.append(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : int = '.'.join(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : List[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
lowerCAmelCase_ : Any = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : str = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Any = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Any:
"""simple docstring"""
with init_empty_weights():
lowerCAmelCase_ : Tuple = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Any = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase_ : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase_ : List[str] = sum(lowerCAmelCase__ , [] )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
lowerCAmelCase_ : Optional[Any] = False
if hasattr(lowerCAmelCase__ , 'base_model_prefix' ):
lowerCAmelCase_ : Optional[Any] = not hasattr(lowerCAmelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : str = list(model.named_children() )
lowerCAmelCase_ : Dict = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Union[str, Any] = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
lowerCAmelCase_ : Optional[Any] = ['.weight', '.bias']
lowerCAmelCase_ : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : int = name.replace(lowerCAmelCase__ , '' )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
for m in model.modules():
if isinstance(lowerCAmelCase__ , bnb.nn.Linearabit ):
return True
return False
def UpperCamelCase_ ( lowerCAmelCase__ : nn.Module ) -> Any:
"""simple docstring"""
return next(parameter.parameters() ).device
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> List[str]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , 0 , dtype=lowerCAmelCase__ , value=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = param_name
lowerCAmelCase_ : Optional[Any] = model
if "." in tensor_name:
lowerCAmelCase_ : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
lowerCAmelCase_ : Optional[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Tuple = splits[-1]
# offload weights
lowerCAmelCase_ : str = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase__ , index=lowerCAmelCase__ , )
else:
offload_weight(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
offload_weight(lowerCAmelCase__ , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase__ , index=lowerCAmelCase__ )
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , 'meta' , dtype=lowerCAmelCase__ , value=torch.empty(*param.size() ) )
| 360
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> List[Any]:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str=0 ) -> Union[str, Any]:
"""simple docstring"""
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[column] )
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=float('inf' ) ) -> Optional[int]:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase_ : Optional[int] = current_dis
return min_dis
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=float('inf' ) ) -> Dict:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , lowerCAmelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCAmelCase__ ):
lowerCAmelCase_ : Any = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase_ : Union[str, Any] = current_dis
return min_dis
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase__ , lowerCAmelCase__ )
# recursion
lowerCAmelCase_ : int = points_counts // 2
lowerCAmelCase_ : Optional[Any] = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[:mid] , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = closest_pair_of_points_sqr(
lowerCAmelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
lowerCAmelCase_ : Any = min(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = dis_between_closest_in_strip(
lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return min(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[str] = column_based_sort(lowerCAmelCase__ , column=0 )
lowerCAmelCase_ : Dict = column_based_sort(lowerCAmelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
) ** 0.5
if __name__ == "__main__":
lowercase__ : List[str] = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 289
| 0
|
"""simple docstring"""
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if index == number_of_items:
return 0
A_ : Tuple = 0
A_ : Optional[int] = 0
A_ : int = knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , index + 1 )
if weights[index] <= max_weight:
A_ : Dict = values[index] + knapsack(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , max_weight - weights[index] , index + 1 )
return max(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase ( unittest.TestCase):
def __init__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : List[Any]=2_24 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[str]=4_00 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = image_size
A_ : List[Any] = min_resolution
A_ : str = max_resolution
A_ : Dict = do_resize
A_ : Dict = size
A_ : str = do_normalize
A_ : List[str] = image_mean
A_ : List[str] = image_std
def a_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def a_ ( self : str ):
"""simple docstring"""
pass
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : str = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : str ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : List[str] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 167
| 1
|
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str=0 ):
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[column] )
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=float('''inf''' ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCamelCase__ ):
_UpperCAmelCase : Any = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCAmelCase : List[str] = current_dis
return min_dis
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=float('''inf''' ) ):
for i in range(min(6 , points_counts - 1 ) , UpperCamelCase__ ):
for j in range(max(0 , i - 6 ) , UpperCamelCase__ ):
_UpperCAmelCase : int = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCAmelCase : Dict = current_dis
return min_dis
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(UpperCamelCase__ , UpperCamelCase__ )
# recursion
_UpperCAmelCase : Optional[int] = points_counts // 2
_UpperCAmelCase : Any = closest_pair_of_points_sqr(
UpperCamelCase__ , points_sorted_on_y[:mid] , UpperCamelCase__ )
_UpperCAmelCase : str = closest_pair_of_points_sqr(
UpperCamelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
_UpperCAmelCase : int = min(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : List[Any] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCamelCase__ )
_UpperCAmelCase : int = dis_between_closest_in_strip(
UpperCamelCase__ , len(UpperCamelCase__ ) , UpperCamelCase__ )
return min(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : str = column_based_sort(UpperCamelCase__ , column=0 )
_UpperCAmelCase : Tuple = column_based_sort(UpperCamelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
) ** 0.5
if __name__ == "__main__":
_lowerCAmelCase :Tuple = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 68
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_UpperCAmelCase : str = {}
def __lowerCAmelCase ( self , A , A , A=1 ) -> Optional[Any]:
if self.graph.get(A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : Optional[int] = [[w, v]]
if not self.graph.get(A ):
_UpperCAmelCase : List[str] = []
def __lowerCAmelCase ( self ) -> Optional[int]:
return list(self.graph )
def __lowerCAmelCase ( self , A , A ) -> int:
if self.graph.get(A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A )
def __lowerCAmelCase ( self , A=-2 , A=-1 ) -> Optional[int]:
if s == d:
return []
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = []
if s == -2:
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A ) != 0:
_UpperCAmelCase : List[str] = stack[len(A ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(A ) == 0:
return visited
def __lowerCAmelCase ( self , A=-1 ) -> List[Any]:
if c == -1:
_UpperCAmelCase : Optional[int] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(A , A , 1 )
def __lowerCAmelCase ( self , A=-2 ) -> Optional[Any]:
_UpperCAmelCase : int = deque()
_UpperCAmelCase : Optional[int] = []
if s == -2:
_UpperCAmelCase : Tuple = list(self.graph )[0]
d.append(A )
visited.append(A )
while d:
_UpperCAmelCase : int = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowerCAmelCase ( self , A ) -> Optional[int]:
_UpperCAmelCase : str = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __lowerCAmelCase ( self , A ) -> int:
return len(self.graph[u] )
def __lowerCAmelCase ( self , A=-2 ) -> str:
_UpperCAmelCase : int = []
_UpperCAmelCase : Union[str, Any] = []
if s == -2:
_UpperCAmelCase : Any = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : str = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A ) != 0:
_UpperCAmelCase : Optional[Any] = stack[len(A ) - 1]
else:
_UpperCAmelCase : List[str] = ss
# check if se have reached the starting point
if len(A ) == 0:
return sorted_nodes
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : Union[str, Any] = -2
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Union[str, Any] = len(A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Tuple = True
if len(A ) != 0:
_UpperCAmelCase : Union[str, Any] = stack[len(A ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(A )
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(A ) == 0:
return list(A )
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : int = -2
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[Any] = len(A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[Any] = True
if len(A ) != 0:
_UpperCAmelCase : int = stack[len(A ) - 1]
else:
_UpperCAmelCase : List[str] = False
indirect_parents.append(A )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : Any = ss
# check if se have reached the starting point
if len(A ) == 0:
return False
def __lowerCAmelCase ( self , A=-2 , A=-1 ) -> Dict:
_UpperCAmelCase : Tuple = time()
self.dfs(A , A )
_UpperCAmelCase : Optional[int] = time()
return end - begin
def __lowerCAmelCase ( self , A=-2 ) -> Dict:
_UpperCAmelCase : int = time()
self.bfs(A )
_UpperCAmelCase : str = time()
return end - begin
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_UpperCAmelCase : str = {}
def __lowerCAmelCase ( self , A , A , A=1 ) -> str:
# check if the u exists
if self.graph.get(A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : int = [[w, v]]
# add the other way
if self.graph.get(A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : List[Any] = [[w, u]]
def __lowerCAmelCase ( self , A , A ) -> List[str]:
if self.graph.get(A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A )
# the other way round
if self.graph.get(A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A )
def __lowerCAmelCase ( self , A=-2 , A=-1 ) -> Any:
if s == d:
return []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Tuple = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A ) != 0:
_UpperCAmelCase : Dict = stack[len(A ) - 1]
else:
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(A ) == 0:
return visited
def __lowerCAmelCase ( self , A=-1 ) -> List[str]:
if c == -1:
_UpperCAmelCase : int = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(A , A , 1 )
def __lowerCAmelCase ( self , A=-2 ) -> Tuple:
_UpperCAmelCase : List[str] = deque()
_UpperCAmelCase : Optional[int] = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
d.append(A )
visited.append(A )
while d:
_UpperCAmelCase : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowerCAmelCase ( self , A ) -> List[str]:
return len(self.graph[u] )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = []
_UpperCAmelCase : Any = []
_UpperCAmelCase : Optional[Any] = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : Any = -2
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Dict = True
if len(A ) != 0:
_UpperCAmelCase : List[str] = stack[len(A ) - 1]
else:
_UpperCAmelCase : str = False
indirect_parents.append(A )
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(A ) == 0:
return list(A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : Tuple = -2
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Any = s
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : List[str] = len(A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[Any] = True
if len(A ) != 0:
_UpperCAmelCase : Dict = stack[len(A ) - 1]
else:
_UpperCAmelCase : str = False
indirect_parents.append(A )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(A ) == 0:
return False
def __lowerCAmelCase ( self ) -> int:
return list(self.graph )
def __lowerCAmelCase ( self , A=-2 , A=-1 ) -> str:
_UpperCAmelCase : List[Any] = time()
self.dfs(A , A )
_UpperCAmelCase : Union[str, Any] = time()
return end - begin
def __lowerCAmelCase ( self , A=-2 ) -> Optional[int]:
_UpperCAmelCase : List[Any] = time()
self.bfs(A )
_UpperCAmelCase : Optional[int] = time()
return end - begin
| 68
| 1
|
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
SCREAMING_SNAKE_CASE__ : Union[str, Any] = deepcopy(SCREAMING_SNAKE_CASE__ )
elif os.path.exists(SCREAMING_SNAKE_CASE__ ):
with io.open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
else:
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE__ ).decode("""utf-8""" )
SCREAMING_SNAKE_CASE__ : Tuple = json.loads(SCREAMING_SNAKE_CASE__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
SCREAMING_SNAKE_CASE__ : int = config
self.set_stage_and_offload()
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
SCREAMING_SNAKE_CASE__ : Any = False
if self.is_zeroa() or self.is_zeroa():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set(["""cpu""", """nvme"""] )
SCREAMING_SNAKE_CASE__ : List[str] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE__ : List[str] = ds_key_long.split(""".""" )
SCREAMING_SNAKE_CASE__ : Dict = nodes.pop()
for node in nodes:
SCREAMING_SNAKE_CASE__ : List[str] = config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
return None, ds_key
return config, ds_key
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.find_config_node(SCREAMING_SNAKE_CASE__ )
if config is None:
return default
return config.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE__ : Optional[Any] = ds_key_long.split(""".""" )
for node in nodes:
SCREAMING_SNAKE_CASE__ : Optional[int] = config
SCREAMING_SNAKE_CASE__ : Optional[int] = config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else bool(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else not bool(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return self._stage == 2
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return self._stage == 3
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
return self._offload
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = engine
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
self.engine.backward(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , device_placement=SCREAMING_SNAKE_CASE__ , scaler=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = hasattr(self.optimizer , """overflow""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=None ) -> Optional[Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.001 , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = params
SCREAMING_SNAKE_CASE__ : Any = lr
SCREAMING_SNAKE_CASE__ : Dict = weight_decay
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = optimizer
SCREAMING_SNAKE_CASE__ : str = total_num_steps
SCREAMING_SNAKE_CASE__ : Tuple = warmup_num_steps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs
| 25
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25
| 1
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
_A : Dict =parser.parse_args()
_A : List[str] =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_A : Any =CLIPImageProcessor()
_A : Union[str, Any] =CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
_A : Union[str, Any] =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 129
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_A : Union[str, Any] =8
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> Tuple:
lowerCamelCase__ : List[str] = x.device
lowerCamelCase__ : Any = (x * 255).int().clamp(0 , 255 )
lowerCamelCase__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
lowerCamelCase__ : int = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b c h w -> b c 1 h w""" )
lowerCamelCase__ : Tuple = ((x & mask) != 0).float()
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """b c d h w -> b (c d) h w""" )
lowerCamelCase__ : Optional[int] = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> List[Any]:
lowerCamelCase__ : List[Any] = x.device
lowerCamelCase__ : Dict = (x > 0).int()
lowerCamelCase__ : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
lowerCamelCase__ : List[Any] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = True , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCamelCase__ : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[timestep]
lowerCamelCase__ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCamelCase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCamelCase__ : Dict = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : Optional[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCamelCase__ : Tuple = self._get_variance(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCamelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCamelCase__ : Dict = model_output.device if torch.is_tensor(UpperCamelCase ) else """cpu"""
lowerCamelCase__ : str = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
lowerCamelCase__ : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="epsilon" , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
lowerCamelCase__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
lowerCamelCase__ : List[str] = None
# 1. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[t]
lowerCamelCase__ : List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCamelCase__ : str = 1 - alpha_prod_t
lowerCamelCase__ : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCamelCase__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCamelCase__ : Optional[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCamelCase__ : str = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : List[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Tuple = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCamelCase__ : Tuple = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase__ : Optional[Any] = 0
if t > 0:
lowerCamelCase__ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
lowerCamelCase__ : str = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
lowerCamelCase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: UNetaDConditionModel , UpperCamelCase__: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase__: Optional[float] = 1.0 , ):
super().__init__()
lowerCamelCase__ : Optional[int] = bit_scale
lowerCamelCase__ : List[Any] = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 50 , UpperCamelCase__: Optional[torch.Generator] = None , UpperCamelCase__: Optional[int] = 1 , UpperCamelCase__: Optional[str] = "pil" , UpperCamelCase__: bool = True , **UpperCamelCase__: int , ):
lowerCamelCase__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = decimal_to_bits(UpperCamelCase__ ) * self.bit_scale
lowerCamelCase__ : Union[str, Any] = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCamelCase__ : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Any = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Dict = bits_to_decimal(UpperCamelCase__ )
if output_type == "pil":
lowerCamelCase__ : int = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 129
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowercase ( unittest.TestCase ):
a = MODEL_FOR_MASKED_LM_MAPPING
a = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCamelCase_ ( self: str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
lowerCamelCase__ : Dict = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
lowerCamelCase__ : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
lowerCamelCase__ : List[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
lowerCamelCase__ : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
lowerCamelCase__ : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
lowerCamelCase__ : Tuple = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
lowerCamelCase__ : List[Any] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
lowerCamelCase__ : Union[str, Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
@require_torch
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(UpperCamelCase__ )
@slow
@require_tf
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Union[str, Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(UpperCamelCase__ )
def lowerCamelCase_ ( self: int , UpperCamelCase__: List[str] ):
lowerCamelCase__ : int = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] , )
lowerCamelCase__ : Any = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
lowerCamelCase__ : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Dict = None
self.run_pipeline_test(UpperCamelCase__ , [] )
@require_tf
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
self.run_pipeline_test(UpperCamelCase__ , [] )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
lowerCamelCase__ : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: List[str] ):
lowerCamelCase__ : List[str] = fill_masker.tokenizer
lowerCamelCase__ : Optional[int] = fill_masker.model
lowerCamelCase__ : Tuple = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : str = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : Union[str, Any] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
UpperCamelCase__ , [
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
] , )
with self.assertRaises(UpperCamelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(UpperCamelCase__ ):
fill_masker("""This is""" )
self.run_test_top_k(UpperCamelCase__ , UpperCamelCase__ )
self.run_test_targets(UpperCamelCase__ , UpperCamelCase__ )
self.run_test_top_k_targets(UpperCamelCase__ , UpperCamelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(UpperCamelCase__ , UpperCamelCase__ )
self.fill_mask_with_multiple_masks(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Optional[int] = tokenizer.get_vocab()
lowerCamelCase__ : str = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCamelCase__ : Any = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , targets=UpperCamelCase__ )
lowerCamelCase__ : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(UpperCamelCase__ ) )
# Call argument
lowerCamelCase__ : str = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : str = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , UpperCamelCase__ )
lowerCamelCase__ : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(UpperCamelCase__ ) )
# Score equivalence
lowerCamelCase__ : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
lowerCamelCase__ : int = [top_mask["""token_str"""] for top_mask in outputs]
lowerCamelCase__ : Optional[int] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCamelCase__ ) == set(UpperCamelCase__ ):
lowerCamelCase__ : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
# Raises with invalid
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase__ : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase__ : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase__ : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: int ):
lowerCamelCase__ : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , top_k=2 )
lowerCamelCase__ : List[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : Dict = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: int ):
lowerCamelCase__ : str = tokenizer.get_vocab()
lowerCamelCase__ : Optional[Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# top_k=2, ntargets=3
lowerCamelCase__ : Optional[int] = sorted(vocab.keys() )[:3]
lowerCamelCase__ : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=UpperCamelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCamelCase__ : Any = [el["""token_str"""] for el in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x["score"] , reverse=UpperCamelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCamelCase__ ).issubset(UpperCamelCase__ ):
lowerCamelCase__ : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=UpperCamelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Dict = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCamelCase__ : Union[str, Any] = sorted(vocab.keys() )[:3]
lowerCamelCase__ : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCamelCase__ : Dict = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=UpperCamelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(UpperCamelCase__ ) , 3 )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Any , UpperCamelCase__: Any ):
lowerCamelCase__ : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : str = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
] , )
| 41
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[str]=18 , UpperCamelCase__: Union[str, Any]=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=[0.5, 0.5, 0.5] , UpperCamelCase__: Optional[Any]=[0.5, 0.5, 0.5] , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = min_resolution
lowerCamelCase__ : Optional[Any] = max_resolution
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 20}
lowerCamelCase__ : Dict = do_thumbnail
lowerCamelCase__ : Optional[int] = do_align_axis
lowerCamelCase__ : Any = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def lowerCamelCase_ ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( _lowercase , unittest.TestCase ):
a = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def lowerCamelCase_ ( self: List[str] ):
pass
@is_flaky()
def lowerCamelCase_ ( self: Union[str, Any] ):
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Optional[int] ):
# Initialize image_processing
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Dict ):
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 41
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 4_2
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , __magic_name__ = 6_55_36 , __magic_name__ = None , __magic_name__ = 2 , __magic_name__ = 2 , __magic_name__ = 0 , __magic_name__ = "fourier" , __magic_name__ = True , __magic_name__ = False , __magic_name__ = 0.0 , __magic_name__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __magic_name__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __magic_name__ = "UNetMidBlock1D" , __magic_name__ = None , __magic_name__ = (32, 32, 64) , __magic_name__ = None , __magic_name__ = 8 , __magic_name__ = 1 , __magic_name__ = False , ) -> Tuple:
super().__init__()
_a = sample_size
# time
if time_embedding_type == "fourier":
_a = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__magic_name__ , log=__magic_name__ , flip_sin_to_cos=__magic_name__ )
_a = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_a = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__magic_name__ , downscale_freq_shift=__magic_name__ )
_a = block_out_channels[0]
if use_timestep_embedding:
_a = block_out_channels[0] * 4
_a = TimestepEmbedding(
in_channels=__magic_name__ , time_embed_dim=__magic_name__ , act_fn=__magic_name__ , out_dim=block_out_channels[0] , )
_a = nn.ModuleList([] )
_a = None
_a = nn.ModuleList([] )
_a = None
# down
_a = in_channels
for i, down_block_type in enumerate(__magic_name__ ):
_a = output_channel
_a = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_a = i == len(__magic_name__ ) - 1
_a = get_down_block(
__magic_name__ , num_layers=__magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__magic_name__ )
# mid
_a = get_mid_block(
__magic_name__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__magic_name__ , add_downsample=__magic_name__ , )
# up
_a = list(reversed(__magic_name__ ) )
_a = reversed_block_out_channels[0]
if out_block_type is None:
_a = out_channels
else:
_a = block_out_channels[0]
for i, up_block_type in enumerate(__magic_name__ ):
_a = output_channel
_a = (
reversed_block_out_channels[i + 1] if i < len(__magic_name__ ) - 1 else final_upsample_channels
)
_a = i == len(__magic_name__ ) - 1
_a = get_up_block(
__magic_name__ , num_layers=__magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__magic_name__ )
_a = output_channel
# out
_a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_a = get_out_block(
out_block_type=__magic_name__ , num_groups_out=__magic_name__ , embed_dim=block_out_channels[0] , out_channels=__magic_name__ , act_fn=__magic_name__ , fc_dim=block_out_channels[-1] // 4 , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = True , ) -> Union[UNetaDOutput, Tuple]:
_a = timestep
if not torch.is_tensor(__magic_name__ ):
_a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__magic_name__ ) and len(timesteps.shape ) == 0:
_a = timesteps[None].to(sample.device )
_a = self.time_proj(__magic_name__ )
if self.config.use_timestep_embedding:
_a = self.time_mlp(__magic_name__ )
else:
_a = timestep_embed[..., None]
_a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_a = ()
for downsample_block in self.down_blocks:
_a , _a = downsample_block(hidden_states=__magic_name__ , temb=__magic_name__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_a = self.mid_block(__magic_name__ , __magic_name__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_a = down_block_res_samples[-1:]
_a = down_block_res_samples[:-1]
_a = upsample_block(__magic_name__ , res_hidden_states_tuple=__magic_name__ , temb=__magic_name__ )
# 5. post-process
if self.out_block:
_a = self.out_block(__magic_name__ , __magic_name__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__magic_name__ )
| 361
|
'''simple docstring'''
def _A (lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[int] ) -> None:
'''simple docstring'''
_a = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
_a = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
_a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : List[str] = [1, 3, 0, 5, 8, 5]
a_ : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 104
| 0
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = CodeGenTokenizer
__UpperCamelCase : int = CodeGenTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = {'''add_prefix_space''': True}
__UpperCamelCase : str = False
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
SCREAMING_SNAKE_CASE__ : Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ : List[str] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = """lower newer"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """lower newer"""
return input_text, output_text
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : int = """lower newer"""
SCREAMING_SNAKE_CASE__ : Dict = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = """lower newer"""
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : str = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
pass
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=15 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
SCREAMING_SNAKE_CASE__ : Any = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ : Dict = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ : Optional[int] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
SCREAMING_SNAKE_CASE__ : str = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
SCREAMING_SNAKE_CASE__ : str = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(*SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = """$$$"""
SCREAMING_SNAKE_CASE__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """This is a simple input"""
SCREAMING_SNAKE_CASE__ : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
SCREAMING_SNAKE_CASE__ : str = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
SCREAMING_SNAKE_CASE__ : int = """\nif len_a > len_b: result = a\nelse: result = b"""
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
SCREAMING_SNAKE_CASE__ : Any = tokenizer.decode(SCREAMING_SNAKE_CASE__ , truncate_before_pattern=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
pass
| 25
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 289
| 0
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCamelCase = spec.loader.load_module()
UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for config_class in list(CONFIG_MAPPING.values() ):
_SCREAMING_SNAKE_CASE = False
# source code of `config_class`
_SCREAMING_SNAKE_CASE = inspect.getsource(snake_case__ )
_SCREAMING_SNAKE_CASE = _re_checkpoint.findall(snake_case__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_SCREAMING_SNAKE_CASE = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_SCREAMING_SNAKE_CASE = True
break
_SCREAMING_SNAKE_CASE = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case__ )
if len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = """\n""".join(sorted(snake_case__ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 125
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = prime_factors(snake_case__ )
if is_square_free(snake_case__ ):
return -1 if len(snake_case__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 125
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A__ , A__ = array[indexa], array[indexa]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> None:
'''simple docstring'''
if length > 1:
A__ = int(length / 2 )
for i in range(SCREAMING_SNAKE_CASE_ , low + middle ):
comp_and_swap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + middle , SCREAMING_SNAKE_CASE_ )
bitonic_merge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
bitonic_merge(SCREAMING_SNAKE_CASE_ , low + middle , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> None:
'''simple docstring'''
if length > 1:
A__ = int(length / 2 )
bitonic_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
bitonic_sort(SCREAMING_SNAKE_CASE_ , low + middle , SCREAMING_SNAKE_CASE_ , 0 )
bitonic_merge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 68
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ = """\
Text data.
Second line of data."""
lowerCAmelCase__ = """file"""
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any:
'''simple docstring'''
A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
A__ = input_paths[compression_format]
A__ = tmp_path / "cache"
A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict:
'''simple docstring'''
A__ = "custom_cache"
A__ = "custom_extracted_dir"
A__ = tmp_path / "custom_extracted_path"
if default_extracted:
A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) )
A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A__ = xz_file
A__ = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
)
A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
# relative path
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]:
'''simple docstring'''
A__ = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
# relative path
A__ = "./__missing_file__.txt"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]:
'''simple docstring'''
A__ = get_from_cache(F'tmp://{tmpfs_file}' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_head("s3://huggingface.co" )
| 68
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
_snake_case : Union[str, Any] = IFInpaintingPipeline
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_snake_case : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_snake_case : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase_ ( self : Optional[int] ):
return self._get_dummy_components()
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=0 ):
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
_UpperCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase_ ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase_ ( self : Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase_ ( self : List[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCAmelCase_ ( self : str ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 364
|
"""simple docstring"""
from itertools import product
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
_UpperCAmelCase = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 30
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =CanineTokenizer
snake_case_ =False
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
super().setUp()
lowerCAmelCase__ : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> CanineTokenizer:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname ,**__lowerCamelCase )
lowerCAmelCase__ : str = 10_24
return tokenizer
@require_torch
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.canine_tokenizer
lowerCAmelCase__ : List[Any] = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
lowerCAmelCase__ : List[str] = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
lowerCAmelCase__ : Any = tokenizer(__lowerCamelCase ,padding=__lowerCamelCase ,return_tensors='''pt''' )
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual((2, 39) ,batch.input_ids.shape )
self.assertEqual((2, 39) ,batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.canine_tokenizer
lowerCAmelCase__ : str = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
lowerCAmelCase__ : Optional[Any] = tokenizer(__lowerCamelCase ,padding=__lowerCamelCase ,return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' ,__lowerCamelCase )
self.assertIn('''attention_mask''' ,__lowerCamelCase )
self.assertIn('''token_type_ids''' ,__lowerCamelCase )
@require_torch
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.canine_tokenizer
lowerCAmelCase__ : Any = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
lowerCAmelCase__ : int = tokenizer(
text_target=__lowerCamelCase ,max_length=32 ,padding='''max_length''' ,truncation=__lowerCamelCase ,return_tensors='''pt''' )
self.assertEqual(32 ,targets['''input_ids'''].shape[1] )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
lowerCAmelCase__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Any = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
lowerCAmelCase__ : int = tokenizer.__class__.from_pretrained(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = after_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : List[Any] = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase__ : List[str] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCAmelCase__ : str = chr(0XE0_07 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase__ : str = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
lowerCAmelCase__ : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
lowerCAmelCase__ : Tuple = after_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertIn(__lowerCamelCase ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
lowerCAmelCase__ : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(__lowerCamelCase )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
lowerCAmelCase__ : str = 0XE0_05
lowerCAmelCase__ : Dict = chr(__lowerCamelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowerCAmelCase__ : Optional[int] = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) ,1 )
lowerCAmelCase__ : Any = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : Any = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,input_encoded + special_token_id )
lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(__lowerCamelCase ,skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ : Union[str, Any] = chr(0XE0_05 )
lowerCAmelCase__ : Union[str, Any] = chr(0XE0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
lowerCAmelCase__ : List[Any] = tokenizer.tokenize(__lowerCamelCase )
lowerCAmelCase__ : int = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) ,1 )
self.assertEqual(len(__lowerCamelCase ) ,1 )
self.assertEqual(token_a[0] ,__lowerCamelCase )
self.assertEqual(token_a[0] ,__lowerCamelCase )
@require_tokenizers
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
lowerCAmelCase__ : List[str] = 0XE0_06
lowerCAmelCase__ : Optional[int] = chr(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase ,'''special_tokens_map.json''' ) ,encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase ,'''tokenizer_config.json''' ) ,encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ : Tuple = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
lowerCAmelCase__ : Optional[Any] = 0XE0_06
lowerCAmelCase__ : Tuple = chr(__lowerCamelCase )
lowerCAmelCase__ : Tuple = [new_token_a]
lowerCAmelCase__ : int = [new_token_a]
with open(os.path.join(__lowerCamelCase ,'''special_tokens_map.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(__lowerCamelCase ,__lowerCamelCase )
with open(os.path.join(__lowerCamelCase ,'''tokenizer_config.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(__lowerCamelCase ,__lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ : str = tokenizer_class.from_pretrained(__lowerCamelCase ,extra_ids=0 )
self.assertIn(__lowerCamelCase ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
lowerCAmelCase__ : List[Any] = 0XE0_07
lowerCAmelCase__ : Optional[int] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ : Union[str, Any] = [AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase )]
lowerCAmelCase__ : Union[str, Any] = tokenizer_class.from_pretrained(
__lowerCamelCase ,additional_special_tokens=__lowerCamelCase ,extra_ids=0 )
self.assertIn(__lowerCamelCase ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ : Dict = '''hello world'''
if self.space_between_special_tokens:
lowerCAmelCase__ : Tuple = '''[CLS] hello world [SEP]'''
else:
lowerCAmelCase__ : List[str] = input
lowerCAmelCase__ : List[str] = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(__lowerCamelCase ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase ,[output, output.lower()] )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ : int = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCAmelCase__ : Any = '''a'''
lowerCAmelCase__ : Dict = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase ,attr + '''_id''' ,__lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase ,__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase ,attr + '''_id''' ) ,__lowerCamelCase )
setattr(__lowerCamelCase ,attr + '''_id''' ,__lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase ,__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase ,attr + '''_id''' ) ,__lowerCamelCase )
setattr(__lowerCamelCase ,'''additional_special_tokens_ids''' ,[] )
self.assertListEqual(getattr(__lowerCamelCase ,'''additional_special_tokens''' ) ,[] )
self.assertListEqual(getattr(__lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[] )
lowerCAmelCase__ : List[str] = 0XE0_06
lowerCAmelCase__ : str = chr(__lowerCamelCase )
setattr(__lowerCamelCase ,'''additional_special_tokens_ids''' ,[additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase ,'''additional_special_tokens''' ) ,[additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[additional_special_token_id] )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
| 129
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__snake_case : int =logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Any = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
lowerCAmelCase__ : List[str] = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Optional[Any] = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowerCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model_accelerate.to(__lowerCamelCase )
model_accelerate.eval()
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Dict = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model_accelerate(__lowerCamelCase ,__lowerCamelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase ,low_cpu_mem_usage=__lowerCamelCase )
model_normal_load.to(__lowerCamelCase )
model_normal_load.eval()
lowerCAmelCase__ : List[Any] = model_normal_load(__lowerCamelCase ,__lowerCamelCase )['''sample''']
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : str = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : str = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 ) )
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ,__lowerCamelCase=(32, 32) ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowerCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self.dummy_input
lowerCAmelCase__ : Tuple = floats_tensor((4, 3) + (2_56, 2_56) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = noise
lowerCAmelCase__ : Union[str, Any] = model(**__lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : List[Any] = (2_56, 2_56)
lowerCAmelCase__ : str = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Optional[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Dict = 3
lowerCAmelCase__ : str = (32, 32)
lowerCAmelCase__ : Tuple = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
| 129
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( UpperCamelCase__ : bytes ) -> str:
"""simple docstring"""
return "".join([hex(lowercase__ )[2:].zfill(2 ).upper() for byte in list(lowercase__ )] )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bytes:
"""simple docstring"""
if (len(lowercase__ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase__ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348
| 0
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
if not isinstance(A__ , A__ ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
snake_case_ = str(A__ )
snake_case_ = """""".join(sorted(A__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _a ( _SCREAMING_SNAKE_CASE = 99 ) -> str:
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
snake_case_ = 0
snake_case_ = 1
while True:
if check_bouncy(A__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(99)}""")
| 347
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def _A ( A__ , A__=1.0 , A__=None , A__=None ):
"""simple docstring"""
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=7 ,lowercase__ : Dict=4_0_0 ,lowercase__ : Tuple=2_0_0_0 ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Optional[int]=1_6_0 ,lowercase__ : Dict=8 ,lowercase__ : str=0.0 ,lowercase__ : Union[str, Any]=4_0_0_0 ,lowercase__ : Optional[int]=False ,lowercase__ : List[str]=True ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
__lowercase = feature_size
__lowercase = chunk_length
__lowercase = hop_length
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str]=False ,lowercase__ : Dict=False ):
def _flatten(lowercase__ : Optional[Any] ):
return list(itertools.chain(*lowercase__ ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = feat_extract_first.save_pretrained(lowercase__ )[0]
check_json_file_has_correct_format(lowercase__ )
__lowercase = self.feature_extraction_class.from_pretrained(lowercase__ )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ) )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''feat_extract.json''' )
feat_extract_first.to_json_file(lowercase__ )
__lowercase = self.feature_extraction_class.from_json_file(lowercase__ )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ) )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase__ ,padding='''max_length''' ,return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] ,return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase = np.asarray(lowercase__ )
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test truncation required
__lowercase = [floats_list((1, x) )[0] for x in range(2_0_0 ,(feature_extractor.n_samples + 5_0_0) ,2_0_0 )]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
__lowercase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs_truncated]
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(1_0_0 ,3_2 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' )
# automatic decoding with librispeech
__lowercase = ds.sort('''id''' ).select(range(lowercase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
__lowercase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = WhisperFeatureExtractor()
__lowercase = feature_extractor(lowercase__ ,return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape ,(1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] ,lowercase__ ,atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = self._load_datasamples(1 )[0]
__lowercase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
__lowercase = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=lowercase__ )[0]
self.assertTrue(np.all(np.mean(lowercase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase__ ) - 1 ) < 1e-3 ) )
| 104
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCamelCase ( SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if subparsers is not None:
__UpperCamelCase :str = subparsers.add_parser('''test''' )
else:
__UpperCamelCase :List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
__UpperCamelCase :List[str] = script_name
else:
__UpperCamelCase :Optional[Any] = f"""--config_file={args.config_file} {script_name}"""
__UpperCamelCase :Optional[int] = ['''accelerate-launch'''] + test_args.split()
__UpperCamelCase :Dict = execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = test_command_parser()
__UpperCamelCase :List[Any] = parser.parse_args()
test_command(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 105
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowercase = '''bert-base-cased'''
__lowercase = '''google/pegasus-xsum'''
__lowercase = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
__lowercase = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
__lowercase = '''patrickvonplaten/t5-tiny-random'''
__lowercase = '''sshleifer/bart-tiny-random'''
__lowercase = '''sshleifer/tiny-mbart'''
__lowercase = '''sshleifer/tiny-marian-en-de'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = '''\n'''.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.source""" ) , SCREAMING_SNAKE_CASE )
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.target""" ) , SCREAMING_SNAKE_CASE )
return tmp_dir
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :List[Any] = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Optional[int] = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :int = 4
__UpperCamelCase :Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__UpperCamelCase , __UpperCamelCase :Tuple = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__UpperCamelCase :str = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , )
__UpperCamelCase :Any = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(__lowercase , __lowercase)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__UpperCamelCase :Optional[int] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :int = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Dict = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :Union[str, Any] = 4
__UpperCamelCase :List[str] = LegacySeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=20 , max_target_length=__lowercase , )
__UpperCamelCase :Dict = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
__UpperCamelCase :Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
__UpperCamelCase :str = tmp_dir.joinpath('''train.source''').open().readlines()
__UpperCamelCase :int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(__lowercase , __lowercase , 128 , __lowercase)
__UpperCamelCase :Union[str, Any] = {x.name for x in tmp_dir.iterdir()}
__UpperCamelCase :int = {x.name for x in save_dir.iterdir()}
__UpperCamelCase :Optional[int] = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowercase) < len(__lowercase)
assert len(__lowercase) == 1
assert len(packed_examples[0]) == sum(len(__lowercase) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def UpperCamelCase__ ( self) -> List[Any]:
if not FAIRSEQ_AVAILABLE:
return
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = self._get_dataset(max_len=64)
__UpperCamelCase :Union[str, Any] = 64
__UpperCamelCase :Tuple = ds.make_dynamic_sampler(__lowercase , required_batch_size_multiple=__lowercase)
__UpperCamelCase :List[str] = [len(__lowercase) for x in batch_sampler]
assert len(set(__lowercase)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowercase) == len(__lowercase) # no dropped or added examples
__UpperCamelCase :int = DataLoader(__lowercase , batch_sampler=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :List[str] = []
__UpperCamelCase :int = []
for batch in data_loader:
__UpperCamelCase :List[Any] = batch['''input_ids'''].shape
__UpperCamelCase :Dict = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__UpperCamelCase :Optional[int] = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(__lowercase)
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowercase)
assert num_src_per_batch[0] == max(__lowercase)
if failures:
raise AssertionError(f"""too many tokens in {len(__lowercase)} batches""")
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = self._get_dataset(max_len=512)
__UpperCamelCase :Any = 2
__UpperCamelCase :List[Any] = ds.make_sortish_sampler(__lowercase , shuffle=__lowercase)
__UpperCamelCase :List[Any] = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :Tuple = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowercase)
__UpperCamelCase :int = tokenizer.pad_token_id
def count_pad_tokens(__lowercase , __lowercase="input_ids"):
return [batch[k].eq(__lowercase).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowercase , k='''labels''')) < sum(count_pad_tokens(__lowercase , k='''labels'''))
assert sum(count_pad_tokens(__lowercase)) < sum(count_pad_tokens(__lowercase))
assert len(__lowercase) == len(__lowercase)
def UpperCamelCase__ ( self , __lowercase=1_000 , __lowercase=128) -> List[Any]:
if os.getenv('''USE_REAL_DATA''' , __lowercase):
__UpperCamelCase :Optional[Any] = '''examples/seq2seq/wmt_en_ro'''
__UpperCamelCase :Dict = max_len * 2 * 64
if not Path(__lowercase).joinpath('''train.len''').exists():
save_len_file(__lowercase , __lowercase)
else:
__UpperCamelCase :Union[str, Any] = '''examples/seq2seq/test_data/wmt_en_ro'''
__UpperCamelCase :Optional[int] = max_len * 4
save_len_file(__lowercase , __lowercase)
__UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :List[Any] = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , n_obs=__lowercase , )
return ds, max_tokens, tokenizer
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._get_dataset()
__UpperCamelCase :List[str] = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowercase))
__UpperCamelCase :Tuple = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowercase))
assert idsa.intersection(__lowercase) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase)
if tok_name == MBART_TINY:
__UpperCamelCase :Optional[Any] = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__UpperCamelCase :Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__UpperCamelCase :Tuple = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__UpperCamelCase :Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowercase) == 1 if tok_name == BART_TINY else len(__lowercase) == 0
| 105
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
snake_case_ : Union[str, Any] = 50_00_00
snake_case_ ,snake_case_ : Optional[int] = os.path.split(__file__)
snake_case_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Dict ) -> str:
UpperCAmelCase_ : List[str] = dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Optional[int] = dataset.filter(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Any:
UpperCAmelCase_ : List[str] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Optional[int] = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCAmelCase_ : Dict = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__, '''dataset.arrow''' ), SCREAMING_SNAKE_CASE__, num_examples=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tokenizer(examples['''text'''] )
UpperCAmelCase_ : List[str] = map(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''numpy''' ):
UpperCAmelCase_ : Dict = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''pandas''' ):
UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
UpperCAmelCase_ : Optional[int] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
UpperCAmelCase_ : Optional[Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = map(SCREAMING_SNAKE_CASE__, function=SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__, '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 125
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : Optional[int] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
snake_case_ : Optional[Any] = {
"yjernite/retribert-base-uncased": 5_12,
}
snake_case_ : Union[str, Any] = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class __a (lowerCamelCase ):
__a : Optional[Any] = VOCAB_FILES_NAMES
__a : Dict = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : int = PRETRAINED_INIT_CONFIGURATION
__a : Union[str, Any] = RetriBertTokenizer
__a : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : str , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Any=True , __magic_name__ : int="[UNK]" , __magic_name__ : List[Any]="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Union[str, Any]="[MASK]" , __magic_name__ : int=True , __magic_name__ : Optional[Any]=None , **__magic_name__ : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
UpperCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Dict = getattr(__magic_name__ , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : Optional[int] = do_lower_case
UpperCAmelCase_ : Optional[int] = strip_accents
UpperCAmelCase_ : Tuple = tokenize_chinese_chars
UpperCAmelCase_ : Optional[int] = normalizer_class(**__magic_name__ )
UpperCAmelCase_ : List[str] = do_lower_case
def UpperCAmelCase__ ( self : int , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 125
| 1
|
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase__ ( _lowercase , _lowercase=0 ):
'''simple docstring'''
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
UpperCAmelCase_ : Optional[int] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase_ : Optional[Any] = current_dis
return min_dis
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
UpperCAmelCase_ : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase_ : Optional[int] = current_dis
return min_dis
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
UpperCAmelCase_ : Optional[int] = points_counts // 2
UpperCAmelCase_ : List[Any] = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
UpperCAmelCase_ : Dict = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
UpperCAmelCase_ : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
UpperCAmelCase_ : Optional[Any] = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = column_based_sort(_lowercase , column=0 )
UpperCAmelCase_ : List[Any] = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__a = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 235
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,) -> Dict:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Dict = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[str] = num_patches + 1
def a__ ( self ) -> str:
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = FlaxBeitModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = FlaxBeitForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : Dict = self.type_sequence_label_size
UpperCAmelCase_ : int = FlaxBeitForImageClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : List[Any] = FlaxBeitForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : List[str] = config_and_inputs
UpperCAmelCase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def a__ ( self ) -> None:
UpperCAmelCase_ : List[Any] = FlaxBeitModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ):
return model(pixel_values=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ : Dict = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ : List[str] = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape ,output.shape )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCAmelCase_ : Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class __a( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> Dict:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
UpperCAmelCase_ : Union[str, Any] = np.ones((1, 196) ,dtype=_SCREAMING_SNAKE_CASE )
# forward pass
UpperCAmelCase_ : Optional[int] = model(pixel_values=_SCREAMING_SNAKE_CASE ,bool_masked_pos=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = outputs.logits
# verify the logits
UpperCAmelCase_ : List[str] = (1, 196, 8_192)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-2 ) )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
# forward pass
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase_ : Dict = (1, 1_000)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
UpperCAmelCase_ : Dict = 281
self.assertEqual(logits.argmax(-1 ).item() ,_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : str = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Dict = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
# forward pass
UpperCAmelCase_ : Dict = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase_ : Union[str, Any] = (1, 21_841)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
UpperCAmelCase_ : Dict = 2_396
self.assertEqual(logits.argmax(-1 ).item() ,_SCREAMING_SNAKE_CASE )
| 235
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ = math.log(len(__A ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[int] , snake_case__: Dict , snake_case__: int , snake_case__: List[str]=None , snake_case__: List[Any]=None ):
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
lowercase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase_ = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase_ = new_module
lowercase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
lowercase_ = tensor_name in module._buffers
lowercase_ = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
lowercase_ = False
lowercase_ = False
if is_buffer or not is_bitsandbytes_available():
lowercase_ = False
lowercase_ = False
else:
lowercase_ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
lowercase_ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
lowercase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowercase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowercase_ = torch.tensor(snake_case__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
lowercase_ = new_value.T
lowercase_ = old_value.__dict__
if is_abit:
lowercase_ = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
lowercase_ = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
lowercase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
lowercase_ = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
lowercase_ = value.to(snake_case__ )
else:
lowercase_ = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
lowercase_ = new_value
else:
lowercase_ = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
lowercase_ = new_value
def a ( snake_case__: str , snake_case__: Union[str, Any]=None , snake_case__: Any=None , snake_case__: List[str]=None , snake_case__: Optional[Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
lowercase_ = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
lowercase_ , lowercase_ = module.weight.shape
else:
lowercase_ = module.in_features
lowercase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowercase_ = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
lowercase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowercase_ = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
lowercase_ = True
# Store the module class in case we need to transpose the weight later
lowercase_ = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( snake_case__: Any , snake_case__: Any=None , snake_case__: Union[str, Any]=None , snake_case__: str=None ):
'''simple docstring'''
lowercase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowercase_ , lowercase_ = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *snake_case__: str , **snake_case__: Dict ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def a ( *snake_case__: Any , **snake_case__: List[Any] ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowercase_ = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase_ = sum(snake_case__ , [] )
lowercase_ = len(snake_case__ ) > 0
# Check if it is a base model
lowercase_ = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase_ = list(model.named_children() )
lowercase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase_ = set(snake_case__ ) - set(snake_case__ )
lowercase_ = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
lowercase_ = ['''.weight''', '''.bias''']
lowercase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase_ = name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 30
| 0
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a ( unittest.TestCase ):
def A_ ( self : Union[str, Any] , lowercase_ : List[str] ):
snake_case_ = 3
snake_case_ = 250
snake_case_ = ids_tensor((batch_size, length) , lowercase_ )
snake_case_ = torch.ones((batch_size, length) , device=lowercase_ , dtype=torch.float ) / length
return input_ids, scores
def A_ ( self : Optional[int] ):
snake_case_ ,snake_case_ = self._get_tensors(5 )
snake_case_ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
snake_case_ ,snake_case_ = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
snake_case_ ,snake_case_ = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def A_ ( self : List[str] ):
snake_case_ = MaxLengthCriteria(max_length=10 )
snake_case_ ,snake_case_ = self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
snake_case_ ,snake_case_ = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
snake_case_ ,snake_case_ = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def A_ ( self : Any ):
snake_case_ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
snake_case_ ,snake_case_ = self._get_tensors(5 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
snake_case_ ,snake_case_ = self._get_tensors(9 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
snake_case_ ,snake_case_ = self._get_tensors(10 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
snake_case_ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def A_ ( self : Optional[int] ):
snake_case_ ,snake_case_ = self._get_tensors(5 )
snake_case_ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase_ , lowercase_ ) )
snake_case_ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase_ , lowercase_ ) )
def A_ ( self : List[str] ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
snake_case_ = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase_ ) , 1 )
| 72
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a : str = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a ( _lowerCamelCase ):
def __init__( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int]=None , lowercase_ : str=1 ):
snake_case_ = tokenizer
snake_case_ = dataset
snake_case_ = len(lowercase_ ) if n_tasks is None else n_tasks
snake_case_ = n_copies
def __iter__( self : Optional[Any] ):
snake_case_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
snake_case_ = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a ( _lowerCamelCase ):
def __init__( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int ):
snake_case_ = start_length
snake_case_ = eof_strings
snake_case_ = tokenizer
def __call__( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , **lowercase_ : List[str] ):
snake_case_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
snake_case_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowercase_ )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = re.split('''(%s)''' % '''|'''.join(__UpperCAmelCase ), __UpperCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=20, **__UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = defaultdict(__UpperCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__UpperCAmelCase ) ):
with torch.no_grad():
snake_case_ = batch['''ids'''].shape[-1]
snake_case_ = accelerator.unwrap_model(__UpperCAmelCase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']], num_return_sequences=__UpperCAmelCase, **__UpperCAmelCase )
# each task is generated batch_size times
snake_case_ = batch['''task_id'''].repeat(__UpperCAmelCase )
snake_case_ = accelerator.pad_across_processes(
__UpperCAmelCase, dim=1, pad_index=tokenizer.pad_token_id )
snake_case_ ,snake_case_ = accelerator.gather((generated_tokens, generated_tasks) )
snake_case_ = generated_tokens.cpu().numpy()
snake_case_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__UpperCAmelCase, __UpperCAmelCase ):
gen_token_dict[task].append(__UpperCAmelCase )
snake_case_ = [[] for _ in range(__UpperCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
snake_case_ = tokenizer.decode(__UpperCAmelCase, skip_special_tokens=__UpperCAmelCase, clean_up_tokenization_spaces=__UpperCAmelCase )
code_gens[task].append(remove_last_block(__UpperCAmelCase ) )
return code_gens
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = HfArgumentParser(__UpperCAmelCase )
snake_case_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
snake_case_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
snake_case_ = '''false'''
if args.num_workers is None:
snake_case_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
snake_case_ = Accelerator()
set_seed(args.seed, device_specific=__UpperCAmelCase )
# Load model and tokenizer
snake_case_ = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case_ = tokenizer.eos_token
snake_case_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
snake_case_ = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0, __UpperCAmelCase, __UpperCAmelCase )] ),
}
# Load evaluation dataset and metric
snake_case_ = load_dataset('''openai_humaneval''' )
snake_case_ = load_metric('''code_eval''' )
snake_case_ = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
snake_case_ = args.n_samples // args.batch_size
snake_case_ = TokenizedDataset(__UpperCAmelCase, human_eval['''test'''], n_copies=__UpperCAmelCase, n_tasks=__UpperCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
snake_case_ = DataLoader(__UpperCAmelCase, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
snake_case_ = code_eval_metric.compute(references=[''''''], predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
snake_case_ ,snake_case_ = accelerator.prepare(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = complete_code(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, n_tasks=__UpperCAmelCase, batch_size=args.batch_size, **__UpperCAmelCase, )
if accelerator.is_main_process:
snake_case_ = []
for task in tqdm(range(__UpperCAmelCase ) ):
snake_case_ = human_eval['''test'''][task]['''test''']
snake_case_ = F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
snake_case_ ,snake_case_ = code_eval_metric.compute(
references=__UpperCAmelCase, predictions=__UpperCAmelCase, num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file, '''w''' ) as fp:
json.dump(__UpperCAmelCase, __UpperCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 72
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """deta"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=9_0_0 , UpperCamelCase__ : str=2_0_4_8 , UpperCamelCase__ : int=6 , UpperCamelCase__ : List[Any]=2_0_4_8 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Optional[int]=6 , UpperCamelCase__ : Optional[int]=1_0_2_4 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any="relu" , UpperCamelCase__ : Tuple=2_5_6 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Optional[Any]=1.0 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int="sine" , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_0_0 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=1 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=0.2_5 , **UpperCamelCase__ : int , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = backbone_config.pop('model_type' )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(UpperCamelCase__ )
UpperCamelCase = backbone_config
UpperCamelCase = num_queries
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
# deformable attributes
UpperCamelCase = num_feature_levels
UpperCamelCase = encoder_n_points
UpperCamelCase = decoder_n_points
UpperCamelCase = two_stage
UpperCamelCase = two_stage_num_proposals
UpperCamelCase = with_box_refine
UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def A ( self : List[str] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A ( self : Tuple ):
"""simple docstring"""
return self.d_model
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 28
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__snake_case = 4
__snake_case = 3
class __snake_case ( lowerCamelCase__ ):
pass
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[str]:
'''simple docstring'''
for shard in shards:
for i in range(__lowerCAmelCase ):
yield {"i": i, "shard": shard}
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =int(os.environ['''RANK'''] )
UpperCAmelCase : Optional[Any] =int(os.environ['''WORLD_SIZE'''] )
UpperCAmelCase : List[Any] =ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCAmelCase )
parser.add_argument('''--local_rank''' , type=__lowerCAmelCase )
parser.add_argument('''--num_workers''' , type=__lowerCAmelCase , default=0 )
UpperCAmelCase : Any =parser.parse_args()
UpperCAmelCase : List[str] =args.streaming
UpperCAmelCase : Tuple =args.num_workers
UpperCAmelCase : int ={'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(__lowerCAmelCase )]}
UpperCAmelCase : Optional[int] =IterableDataset.from_generator(__lowerCAmelCase , gen_kwargs=__lowerCAmelCase )
if not streaming:
UpperCAmelCase : List[Any] =Dataset.from_list(list(__lowerCAmelCase ) )
UpperCAmelCase : Dict =split_dataset_by_node(__lowerCAmelCase , rank=__lowerCAmelCase , world_size=__lowerCAmelCase )
UpperCAmelCase : List[Any] =torch.utils.data.DataLoader(__lowerCAmelCase , num_workers=__lowerCAmelCase )
UpperCAmelCase : Dict =NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase : str =full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase : List[Any] =sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 348
| 0
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __snake_case( _lowerCAmelCase ) -> Any:
snake_case__ : Optional[int] = tmp_path / """file.csv"""
snake_case__ : Dict = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(_lowerCAmelCase , """w""" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : Any = tmp_path / """malformed_file.csv"""
snake_case__ : List[str] = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(_lowerCAmelCase , """w""" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = tmp_path / """csv_with_image.csv"""
snake_case__ : int = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(_lowerCAmelCase , """w""" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Dict = tmp_path / """csv_with_label.csv"""
snake_case__ : Union[str, Any] = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(_lowerCAmelCase , """w""" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : str = tmp_path / """csv_with_int_list.csv"""
snake_case__ : List[Any] = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(_lowerCAmelCase , """w""" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : Tuple = Csv()
snake_case__ : Tuple = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case__ : List[Any] = f.read().splitlines()[1]
snake_case__ : int = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
snake_case__ : Tuple = csv._generate_tables([[csv_file_with_image]] )
snake_case__ : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
snake_case__ : Tuple = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def __snake_case( _lowerCAmelCase ) -> str:
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case__ : Any = f.read().splitlines()[1:]
snake_case__ : Tuple = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
snake_case__ : Any = csv._generate_tables([[csv_file_with_label]] )
snake_case__ : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
snake_case__ : Optional[Any] = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(_lowerCAmelCase ) for label in labels]
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Dict = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
snake_case__ : Any = csv._generate_tables([[csv_file_with_int_list]] )
snake_case__ : int = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
snake_case__ : Dict = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 43
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = DDIMPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase = False
def lowerCamelCase ( self : List[str] ):
torch.manual_seed(0 )
snake_case__ : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
snake_case__ : Optional[Any] = DDIMScheduler()
snake_case__ : Any = {"""unet""": unet, """scheduler""": scheduler}
return components
def lowerCamelCase ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0 ):
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : str = torch.manual_seed(snake_case_ )
else:
snake_case__ : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : Union[str, Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = """cpu"""
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : str = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(snake_case_ )
snake_case__ : str = pipe(**snake_case_ ).images
snake_case__ : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
snake_case__ : Union[str, Any] = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4] )
snake_case__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def lowerCamelCase ( self : List[str] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase ( self : Tuple ):
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCamelCase ( self : Optional[int] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCamelCase ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[Any] = """google/ddpm-cifar10-32"""
snake_case__ : Optional[Any] = UNetaDModel.from_pretrained(snake_case_ )
snake_case__ : List[Any] = DDIMScheduler()
snake_case__ : List[str] = DDIMPipeline(unet=snake_case_ , scheduler=snake_case_ )
ddim.to(snake_case_ )
ddim.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = ddim(generator=snake_case_ , eta=0.0 , output_type="""numpy""" ).images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : Optional[Any] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Dict = """google/ddpm-ema-bedroom-256"""
snake_case__ : Dict = UNetaDModel.from_pretrained(snake_case_ )
snake_case__ : Optional[int] = DDIMScheduler.from_pretrained(snake_case_ )
snake_case__ : Tuple = DDIMPipeline(unet=snake_case_ , scheduler=snake_case_ )
ddpm.to(snake_case_ )
ddpm.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : int = ddpm(generator=snake_case_ , output_type="""numpy""" ).images
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case__ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 43
| 1
|
"""simple docstring"""
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ) ->str:
"""simple docstring"""
snake_case_ = name
snake_case_ = value
snake_case_ = weight
def __repr__( self : Any ) ->Optional[int]:
"""simple docstring"""
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
return self.value
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
return self.name
def lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
return self.weight
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return self.value / self.weight
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = []
for i in range(len(A__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = sorted(A__ , key=A__ , reverse=A__ )
snake_case_ = []
snake_case_ , snake_case_ = 0.0, 0.0
for i in range(len(A__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _a ( ) -> List[str]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _A ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def _A ( A__ , A__ , A__ , A__ = False ):
"""simple docstring"""
__lowercase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowercase = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__lowercase = '''cpu'''
__lowercase = Path(A__ )
# VAE DECODER
__lowercase = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__lowercase = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowercase = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 104
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MaskFormerFeatureExtractor']
__UpperCAmelCase = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCAmelCase = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 354
|
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = '''MobileNetV1Config'''
# Base docstring
a__ = '''google/mobilenet_v1_1.0_224'''
a__ = [1, 1024, 7, 7]
# Image classification docstring
a__ = '''google/mobilenet_v1_1.0_224'''
a__ = '''tabby, tabby cat'''
a__ = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ,__a : int=None ) -> Tuple:
"""simple docstring"""
_a : Optional[int] = {}
if isinstance(__a ,__a ):
_a : Tuple = model.mobilenet_va
else:
_a : Optional[Any] = model
_a : List[Any] = '''MobilenetV1/Conv2d_0/'''
_a : int = backbone.conv_stem.convolution.weight
_a : Optional[int] = backbone.conv_stem.normalization.bias
_a : List[str] = backbone.conv_stem.normalization.weight
_a : Optional[Any] = backbone.conv_stem.normalization.running_mean
_a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_a : Optional[int] = i + 1
_a : int = i * 2
_a : Union[str, Any] = backbone.layer[pt_index]
_a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
_a : List[Any] = pointer.convolution.weight
_a : Tuple = pointer.normalization.bias
_a : Union[str, Any] = pointer.normalization.weight
_a : str = pointer.normalization.running_mean
_a : List[Any] = pointer.normalization.running_var
_a : Optional[int] = backbone.layer[pt_index + 1]
_a : List[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
_a : Tuple = pointer.convolution.weight
_a : Any = pointer.normalization.bias
_a : List[Any] = pointer.normalization.weight
_a : int = pointer.normalization.running_mean
_a : List[Any] = pointer.normalization.running_var
if isinstance(__a ,__a ):
_a : Optional[int] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
_a : int = model.classifier.weight
_a : Union[str, Any] = model.classifier.bias
return tf_to_pt_map
def __UpperCAmelCase ( __a : Optional[Any] ,__a : List[str] ,__a : Optional[Any] ) -> Tuple:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
_a : Optional[int] = tf.train.list_variables(__a )
_a : int = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
_a : str = tf.train.load_variable(__a ,__a )
_a : int = array
# Build TF to PyTorch weights loading map
_a : List[str] = _build_tf_to_pytorch_map(__a ,__a ,__a )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
_a : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
_a : int = np.transpose(__a ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
_a : Optional[int] = array.squeeze().transpose()
else:
_a : List[str] = np.transpose(__a ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
_a : Dict = torch.from_numpy(__a )
tf_weights.pop(__a ,__a )
tf_weights.pop(name + '''/RMSProp''' ,__a )
tf_weights.pop(name + '''/RMSProp_1''' ,__a )
tf_weights.pop(name + '''/ExponentialMovingAverage''' ,__a )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def __UpperCAmelCase ( __a : torch.Tensor ,__a : nn.Convad ) -> torch.Tensor:
"""simple docstring"""
_a , _a : int = features.shape[-2:]
_a , _a : Tuple = conv_layer.stride
_a , _a : Optional[int] = conv_layer.kernel_size
if in_height % stride_height == 0:
_a : Dict = max(kernel_height - stride_height ,0 )
else:
_a : int = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_a : Optional[Any] = max(kernel_width - stride_width ,0 )
else:
_a : List[str] = max(kernel_width - (in_width % stride_width) ,0 )
_a : Any = pad_along_width // 2
_a : Optional[Any] = pad_along_width - pad_left
_a : List[Any] = pad_along_height // 2
_a : Any = pad_along_height - pad_top
_a : List[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__a ,__a ,'''constant''' ,0.0 )
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _a , _a , _a , _a , _a = 1 , _a = 1 , _a = False , _a = True , _a = True , ) -> None:
super().__init__()
_a : List[str] = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
_a : str = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_a : Dict = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , stride=_a , padding=_a , groups=_a , bias=_a , padding_mode='''zeros''' , )
if use_normalization:
_a : Dict = nn.BatchNormad(
num_features=_a , eps=config.layer_norm_eps , momentum=0.9997 , affine=_a , track_running_stats=_a , )
else:
_a : Optional[int] = None
if use_activation:
if isinstance(_a , _a ):
_a : Any = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _a ):
_a : str = ACTaFN[config.hidden_act]
else:
_a : Optional[Any] = config.hidden_act
else:
_a : List[str] = None
def __lowercase ( self , _a ) -> torch.Tensor:
if self.config.tf_padding:
_a : int = apply_tf_padding(_a , self.convolution )
_a : Dict = self.convolution(_a )
if self.normalization is not None:
_a : Any = self.normalization(_a )
if self.activation is not None:
_a : Any = self.activation(_a )
return features
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = MobileNetVaConfig
UpperCAmelCase__ : Dict = load_tf_weights_in_mobilenet_va
UpperCAmelCase__ : Any = "mobilenet_v1"
UpperCAmelCase__ : str = "pixel_values"
UpperCAmelCase__ : Union[str, Any] = False
def __lowercase ( self , _a ) -> None:
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a__ = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a__ = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a = True ) -> Dict:
super().__init__(_a )
_a : Union[str, Any] = config
_a : Optional[int] = 3_2
_a : int = max(int(depth * config.depth_multiplier ) , config.min_depth )
_a : Tuple = MobileNetVaConvLayer(
_a , in_channels=config.num_channels , out_channels=_a , kernel_size=3 , stride=2 , )
_a : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_a : Optional[Any] = nn.ModuleList()
for i in range(1_3 ):
_a : str = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=3 , stride=strides[i] , groups=_a , ) )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=1 , ) )
_a : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowercase ( self , _a = None , _a = None , _a = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
_a : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
_a : str = self.conv_stem(_a )
_a : List[str] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_a : Union[str, Any] = layer_module(_a )
if output_hidden_states:
_a : List[str] = all_hidden_states + (hidden_states,)
_a : Any = hidden_states
if self.pooler is not None:
_a : Dict = torch.flatten(self.pooler(_a ) , start_dim=1 )
else:
_a : str = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=_a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a ) -> None:
super().__init__(_a )
_a : int = config.num_labels
_a : List[str] = MobileNetVaModel(_a )
_a : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_a : List[str] = nn.Dropout(config.classifier_dropout_prob , inplace=_a )
_a : Tuple = nn.Linear(_a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowercase ( self , _a = None , _a = None , _a = None , _a = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
_a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Any = self.mobilenet_va(_a , output_hidden_states=_a , return_dict=_a )
_a : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
_a : Dict = self.classifier(self.dropout(_a ) )
_a : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Tuple = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : Optional[int] = '''single_label_classification'''
else:
_a : Union[str, Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
_a : Dict = MSELoss()
if self.num_labels == 1:
_a : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a : Union[str, Any] = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
_a : Optional[Any] = CrossEntropyLoss()
_a : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : Union[str, Any] = BCEWithLogitsLoss()
_a : List[Any] = loss_fct(_a , _a )
if not return_dict:
_a : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , )
| 235
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (DDPMScheduler,)
def __lowercase ( self , **_a ) -> Any:
_a : List[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_a )
return config
def __lowercase ( self ) -> Any:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __lowercase ( self ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __lowercase ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __lowercase ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowercase ( self ) -> Dict:
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __lowercase ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> int:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_a )
def __lowercase ( self ) -> int:
_a : int = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config()
_a : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowercase ( self ) -> Tuple:
_a : int = self.scheduler_classes[0]
_a : int = self.get_scheduler_config()
_a : int = scheduler_class(**_a )
_a : Optional[int] = len(_a )
_a : Optional[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
_a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
_a : str = model(_a , _a )
# 2. predict previous mean of sample x_t-1
_a : Optional[int] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a : List[Any] = pred_prev_sample
_a : str = torch.sum(torch.abs(_a ) )
_a : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a : Union[str, Any] = scheduler_class(**_a )
_a : Dict = len(_a )
_a : int = self.dummy_model()
_a : Tuple = self.dummy_sample_deter
_a : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
_a : Dict = model(_a , _a )
# 2. predict previous mean of sample x_t-1
_a : int = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a : str = pred_prev_sample
_a : str = torch.sum(torch.abs(_a ) )
_a : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.scheduler_classes[0]
_a : Tuple = self.get_scheduler_config()
_a : Any = scheduler_class(**_a )
_a : Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_a )
_a : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
_a : Dict = -1
else:
_a : Tuple = timesteps[i + 1]
_a : Optional[Any] = scheduler.previous_timestep(_a )
_a : Optional[Any] = prev_t.item()
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**_a )
_a : str = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_a , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def __lowercase ( self ) -> str:
_a : List[str] = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config()
_a : Dict = scheduler_class(**_a )
_a : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_a : Optional[Any] = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __lowercase ( self ) -> Optional[int]:
_a : Dict = self.scheduler_classes[0]
_a : Union[str, Any] = self.get_scheduler_config()
_a : int = scheduler_class(**_a )
_a : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 235
| 1
|
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __magic_name__( ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 364
|
'''simple docstring'''
import numpy as np
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ):
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase)
__lowerCAmelCase = np.iscomplexobj(lowerCamelCase)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase, input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase)
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(lowerCamelCase)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase))
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_)
return lambda_, vector
def __magic_name__( ):
__lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]])
__lowerCAmelCase = np.array([4_1, 4, 2_0])
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa)
__lowerCAmelCase = np.triu(1J * complex_input_matrix, 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase)
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 9
| 0
|
"""simple docstring"""
lowerCAmelCase__ = [0, 2, 4, 6, 8]
lowerCAmelCase__ = [1, 3, 5, 7, 9]
def snake_case_ ( A_ : int, A_ : int, A_ : list[int], A_ : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCamelCase : int = 0
for digit in range(10 ):
_lowerCamelCase : List[str] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, A_, A_ )
return result
_lowerCamelCase : List[Any] = 0
for digita in range(10 ):
_lowerCamelCase : List[str] = digita
if (remainder + digita) % 2 == 0:
_lowerCamelCase : Tuple = ODD_DIGITS
else:
_lowerCamelCase : List[str] = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCamelCase : int = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, A_, A_, )
return result
def snake_case_ ( A_ : int = 9 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(A_, 0, [0] * length, A_ )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72
|
"""simple docstring"""
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A_ ):
if len(A_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A_ ) )
return data_lists
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = []
for dlist, weight in zip(A_, A_ ):
_lowerCamelCase : Any = min(A_ )
_lowerCamelCase : Optional[Any] = max(A_ )
_lowerCamelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCamelCase : str = F'''Invalid weight of {weight:f} provided'''
raise ValueError(A_ )
score_lists.append(A_ )
return score_lists
def snake_case_ ( A_ : list[list[float]] ):
'''simple docstring'''
_lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A_ ):
_lowerCamelCase : List[str] = final_scores[j] + ele
return final_scores
def snake_case_ ( A_ : list[list[float]], A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = get_data(A_ )
_lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ )
_lowerCamelCase : str = generate_final_scores(A_ )
# append scores to source data
for i, ele in enumerate(A_ ):
source_data[i].append(A_ )
return source_data
| 72
| 1
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self : Any , _UpperCamelCase : Dict , _UpperCamelCase : Tuple=13 , _UpperCamelCase : Any=7 , _UpperCamelCase : Dict=True , _UpperCamelCase : int=True , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[Any]=99 , _UpperCamelCase : List[Any]=32 , _UpperCamelCase : str=5 , _UpperCamelCase : str=4 , _UpperCamelCase : Tuple=37 , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any=512 , _UpperCamelCase : Union[str, Any]=16 , _UpperCamelCase : str=2 , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Any=3 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : int=None , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def __snake_case( self : int , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case( self : int , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , *_UpperCamelCase : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.seq_length // 2
SCREAMING_SNAKE_CASE = 0
# first forward pass
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE = ids_tensor((1,) , _UpperCamelCase ).item() + 1
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_UpperCamelCase )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase )["last_hidden_state"]
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , past_key_values=_UpperCamelCase , attention_mask=_UpperCamelCase )["last_hidden_state"]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def __snake_case( self : int , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , *_UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptModel(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=_UpperCamelCase )
# first forward pass
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase )["last_hidden_state"]
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[
"last_hidden_state"
]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def __snake_case( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , *_UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptForCausalLM(_UpperCamelCase )
model.to(_UpperCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __snake_case( self : Tuple , _UpperCamelCase : Dict , *_UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptModel(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : int , *_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = BioGptForTokenClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( a , a , a , unittest.TestCase ):
lowercase__ : Union[str, Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase__ : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase__ : Dict = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_UpperCamelCase )
def __snake_case( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_UpperCamelCase , gradient_checkpointing=_UpperCamelCase )
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_UpperCamelCase )
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_UpperCamelCase )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_UpperCamelCase )
@slow
def __snake_case( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE = "left"
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE = tokenizer.eos_token
SCREAMING_SNAKE_CASE = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE = [
"Hello, my dog is a little",
"Today, I",
]
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase , return_tensors="pt" , padding=_UpperCamelCase )
SCREAMING_SNAKE_CASE = inputs["input_ids"].to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model.generate(
input_ids=_UpperCamelCase , attention_mask=inputs["attention_mask"].to(_UpperCamelCase ) , )
SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model.generate(input_ids=_UpperCamelCase )
SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = BioGptModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict["input_ids"]
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = "multi_label_classification"
SCREAMING_SNAKE_CASE = input_dict["input_ids"]
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE = torch.tensor([[2, 4_805, 9, 656, 21]] )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = 42_384
SCREAMING_SNAKE_CASE = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 10.4_557], [-11.0_469, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_UpperCamelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = tokenizer("COVID-19 is" , return_tensors="pt" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model.generate(
**_UpperCamelCase , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
| 362
|
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
while b:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = b, a % b
return a
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase__ , a % b )
def __lowerCamelCase ():
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 206
| 0
|
import re
from filelock import FileLock
try:
import nltk
__lowercase = True
except (ImportError, ModuleNotFoundError):
__lowercase = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
re.sub('''<n>''' , '''''' , SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE ) )
| 43
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
| 1
|
import functools
from typing import Any
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or len(_UpperCamelCase ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not all(
isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
snake_case_ : dict[str, Any] = {}
snake_case_ : str = '''WORD_KEEPER'''
for word in words:
snake_case_ : Optional[int] = trie
for c in word:
if c not in trie_node:
snake_case_ : str = {}
snake_case_ : Dict = trie_node[c]
snake_case_ : Optional[Any] = True
snake_case_ : Optional[int] = len(_UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_UpperCamelCase ) -> bool:
if index == len_string:
return True
snake_case_ : Optional[Any] = trie
for i in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : int = trie_node.get(string[i] , _UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(_UpperCamelCase , _UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __lowerCAmelCase :
lowerCamelCase_ : CommonSchedulerState
# setable values
lowerCamelCase_ : jnp.ndarray
lowerCamelCase_ : jnp.ndarray
lowerCamelCase_ : Optional[int] = None
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return cls(common=__magic_name__ , init_noise_sigma=__magic_name__ , timesteps=__magic_name__ )
@dataclass
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : DDPMSchedulerState
class __lowerCAmelCase ( _a, _a ):
lowerCamelCase_ : List[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase_ : jnp.dtype
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return True
@register_to_config
def __init__(self , __magic_name__ = 1000 , __magic_name__ = 0.0_001 , __magic_name__ = 0.02 , __magic_name__ = "linear" , __magic_name__ = None , __magic_name__ = "fixed_small" , __magic_name__ = True , __magic_name__ = "epsilon" , __magic_name__ = jnp.floataa , ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = dtype
def lowerCamelCase (self , __magic_name__ = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
snake_case_ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
snake_case_ : Any = jnp.array(1.0 , dtype=self.dtype )
snake_case_ : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__magic_name__ , init_noise_sigma=__magic_name__ , timesteps=__magic_name__ , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = () ) -> DDPMSchedulerState:
'''simple docstring'''
snake_case_ : str = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
snake_case_ : List[str] = (jnp.arange(0 , __magic_name__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__magic_name__ , timesteps=__magic_name__ , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ) -> int:
'''simple docstring'''
snake_case_ : Any = state.common.alphas_cumprod[t]
snake_case_ : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case_ : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
snake_case_ : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
snake_case_ : int = jnp.clip(__magic_name__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
snake_case_ : List[str] = jnp.log(jnp.clip(__magic_name__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
snake_case_ : str = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
snake_case_ : int = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
snake_case_ : str = variance
snake_case_ : Optional[Any] = state.common.betas[t]
snake_case_ : Optional[Any] = (predicted_variance + 1) / 2
snake_case_ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
snake_case_ : Tuple = timestep
if key is None:
snake_case_ : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
snake_case_ , snake_case_ : Any = jnp.split(__magic_name__ , sample.shape[1] , axis=1 )
else:
snake_case_ : Optional[Any] = None
# 1. compute alphas, betas
snake_case_ : List[Any] = state.common.alphas_cumprod[t]
snake_case_ : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
snake_case_ : List[Any] = 1 - alpha_prod_t
snake_case_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case_ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case_ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
snake_case_ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case_ : Optional[int] = jnp.clip(__magic_name__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
snake_case_ : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
snake_case_ : List[Any] = jax.random.split(__magic_name__ , num=1 )
snake_case_ : Tuple = jax.random.normal(__magic_name__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__magic_name__ , __magic_name__ , predicted_variance=__magic_name__ ) ** 0.5) * noise
snake_case_ : List[str] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
snake_case_ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__magic_name__ , state=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , __magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , __magic_name__ , __magic_name__ , __magic_name__ )
def __len__(self ) -> int:
'''simple docstring'''
return self.config.num_train_timesteps
| 279
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.