code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Union[str, Any] , __A: Optional[Any] , __A: List[Any]=13 , __A: Optional[Any]=7 , __A: Dict=True , __A: Union[str, Any]=True , __A: Union[str, Any]=True , __A: int=True , __A: Optional[int]=99 , __A: str=32 , __A: Any=5 , __A: Any=4 , __A: List[Any]=4 , __A: Optional[int]="gelu" , __A: Any=0.0 , __A: List[Any]=0.1 , __A: int=True , __A: List[Any]=5_12 , __A: str=16 , __A: Optional[Any]=2 , __A: Optional[Any]=0.02 , __A: List[Any]=3 , __A: Optional[int]=4 , __A: Any=None , ) -> int:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_multiple_size
_A = hidden_act
_A = hidden_dropout
_A = attention_dropout
_A = weight_tying
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def __A ( self: Optional[int] ) -> int:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = self.get_config()
return config, input_ids, input_mask, token_labels
def __A ( self: Optional[int] ) -> Any:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def __A ( self: Tuple ) -> Optional[Any]:
_A = self.prepare_config_and_inputs()
_A = True
return config, input_ids, input_mask, token_labels
def __A ( self: Union[str, Any] , __A: Any , __A: Any , __A: Any ) -> Tuple:
_A = GPTNeoXJapaneseModel(config=__A )
model.to(__A )
model.eval()
_A = model(__A , attention_mask=__A )
_A = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self: int , __A: Tuple , __A: List[Any] , __A: Any ) -> str:
_A = True
_A = GPTNeoXJapaneseModel(__A )
model.to(__A )
model.eval()
_A = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self: Union[str, Any] , __A: Optional[Any] , __A: List[str] , __A: int , __A: Tuple ) -> str:
_A = GPTNeoXJapaneseForCausalLM(config=__A )
model.to(__A )
model.eval()
_A = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self: Optional[int] , __A: Dict , __A: Union[str, Any] , __A: Tuple ) -> Optional[int]:
_A = True
_A = GPTNeoXJapaneseForCausalLM(config=__A )
model.to(__A )
model.eval()
# first forward pass
_A = model(__A , attention_mask=__A , use_cache=__A )
_A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A = torch.cat([input_ids, next_tokens] , dim=-1 )
_A = torch.cat([input_mask, next_mask] , dim=-1 )
_A = model(__A , attention_mask=__A , output_hidden_states=__A )
_A = output_from_no_past['hidden_states'][0]
_A = model(
__A , attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )['hidden_states'][0]
# select random slice
_A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A = output_from_no_past[:, -3:, random_slice_idx].detach()
_A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) )
def __A ( self: Optional[Any] ) -> str:
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
A_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
A_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
A_ = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def __A ( self: List[str] ) -> List[str]:
_A = GPTNeoXJapaneseModelTester(self )
_A = ConfigTester(self , config_class=__A , hidden_size=37 )
def __A ( self: List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def __A ( self: Optional[int] ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__A , __A , __A )
def __A ( self: Tuple ) -> int:
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__A , __A , __A )
def __A ( self: Tuple ) -> Any:
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
_A = None
self.model_tester.create_and_check_model_as_decoder(__A , __A , __A )
def __A ( self: Optional[Any] ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__A , __A , __A )
def __A ( self: int ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__A )
@slow
def __A ( self: str ) -> int:
_A = 'abeja/gpt-neox-japanese-2.7b'
_A = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
_A = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
_A = GPTNeoXJapaneseTokenizer.from_pretrained(__A )
_A = GPTNeoXJapaneseForCausalLM.from_pretrained(__A )
_A = []
for prompt in prompts:
_A = tokenizer(__A , return_tensors='''pt''' ).input_ids
_A = model.generate(__A , max_length=50 )
_A = tokenizer.batch_decode(__A , skip_special_tokens=__A )
predicted_outputs += generated_string
self.assertListEqual(__A , __A )
| 484
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowerCamelCase : Dict = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowercase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : int ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = torchvision.models.resnetaaa(pretrained=snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(*snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase_ ( self : Any , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.pool(self.model(snake_case ) )
SCREAMING_SNAKE_CASE : Any = torch.flatten(snake_case , start_dim=2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self : str , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [json.loads(snake_case ) for l in open(snake_case )]
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.dirname(snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer
SCREAMING_SNAKE_CASE : Dict = labels
SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
SCREAMING_SNAKE_CASE : int = max_seq_length
SCREAMING_SNAKE_CASE : List[Any] = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Dict , snake_case : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=snake_case ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE : str = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
SCREAMING_SNAKE_CASE : Tuple = self.transforms(snake_case )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def __a ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = [len(row['sentence'] ) for row in batch]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = len(__lowerCAmelCase ), max(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
SCREAMING_SNAKE_CASE : List[str] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = input_row['sentence']
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : List[str] = torch.stack([row['image'] for row in batch] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([row['label'] for row in batch] )
SCREAMING_SNAKE_CASE : Tuple = torch.stack([row['image_start_token'] for row in batch] )
SCREAMING_SNAKE_CASE : Any = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __a ( ) -> str:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __a ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 352
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'philschmid/bart-large-cnn-samsum'
_lowerCamelCase = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
_lowerCamelCase = 'summarizer'
_lowerCamelCase = AutoTokenizer
_lowerCamelCase = AutoModelForSeqaSeqLM
_lowerCamelCase = ['text']
_lowerCamelCase = ['text']
def UpperCamelCase ( self , lowercase_ ):
return self.pre_processor(lowercase_ , return_tensors="pt" , truncation=lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
return self.model.generate(**lowercase_ )[0]
def UpperCamelCase ( self , lowercase_ ):
return self.pre_processor.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
| 580
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
_lowerCamelCase = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
_lowerCamelCase = PandasConfig
def UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase ( self , lowercase_ ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_snake_case : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase_ , (str, list, tuple) ):
_snake_case : str = data_files
if isinstance(lowercase_ , lowercase_ ):
_snake_case : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_snake_case : Optional[Any] = [dl_manager.iter_files(lowercase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_snake_case : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(lowercase_ , lowercase_ ):
_snake_case : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_snake_case : int = [dl_manager.iter_files(lowercase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase_ , gen_kwargs={"files": files} ) )
return splits
def UpperCamelCase ( self , lowercase_ ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_snake_case : str = table_cast(lowercase_ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase ( self , lowercase_ ):
for i, file in enumerate(itertools.chain.from_iterable(lowercase_ ) ):
with open(lowercase_ , "rb" ) as f:
_snake_case : Dict = pa.Table.from_pandas(pd.read_pickle(lowercase_ ) )
yield i, self._cast_table(lowercase_ )
| 580
| 1
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
UpperCAmelCase_ : list[list[str]] = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Any = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : Optional[Any] = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = ["".join(_SCREAMING_SNAKE_CASE ) for row in temp_grid]
UpperCAmelCase_ : int = "".join(_SCREAMING_SNAKE_CASE )
return output_string
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
UpperCAmelCase_ : list[list[str]] = [[] for _ in range(_SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : int = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
UpperCAmelCase_ : str = 0
for row in temp_grid: # fills in the characters
UpperCAmelCase_ : List[Any] = input_string[counter : counter + len(_SCREAMING_SNAKE_CASE )]
grid.append(list(_SCREAMING_SNAKE_CASE ) )
counter += len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = "" # reads as zigzag
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : Optional[Any] = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> dict[int, str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {}
for key_guess in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # tries every key
UpperCAmelCase_ : Any = decrypt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case__ :
'''simple docstring'''
__A = 42
__A = None
__A = None
_lowerCamelCase : str = namedtuple('CoinsDistribResult', 'moves excess')
def _lowerCAmelCase ( __magic_name__ :TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__magic_name__ :TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_, UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_, UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowercase_ = TypeVar('''T''')
lowercase_ = TypeVar('''U''')
class A__ ( Generic[T, U] ):
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
__magic_name__ : Tuple = key
__magic_name__ : Union[str, Any] = val
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class A__ ( Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
__magic_name__ : List[Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
__magic_name__ : str = ['''DoubleLinkedList''']
__magic_name__ : str = self.head
while node.next is not None:
rep.append(str(lowerCamelCase ) )
__magic_name__ : Dict = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> None:
"""simple docstring"""
__magic_name__ : List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__magic_name__ : Optional[Any] = node
__magic_name__ : Optional[Any] = previous
__magic_name__ : str = node
__magic_name__ : Optional[int] = self.rear
def lowercase ( self , lowerCamelCase ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
__magic_name__ : Dict = node.next
__magic_name__ : Any = node.prev
__magic_name__ : Union[str, Any] = None
__magic_name__ : List[Any] = None
return node
class A__ ( Generic[T, U] ):
lowerCamelCase__ : dict[Callable[[T], U], LRUCache[T, U]] ={}
def __init__( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : DoubleLinkedList[T, U] = DoubleLinkedList()
__magic_name__ : Union[str, Any] = capacity
__magic_name__ : Tuple = 0
__magic_name__ : Tuple = 0
__magic_name__ : Optional[Any] = 0
__magic_name__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowerCamelCase ) -> bool:
"""simple docstring"""
return key in self.cache
def lowercase ( self , lowerCamelCase ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
__magic_name__ : DoubleLinkedListNode[T, U] = self.cache[key]
__magic_name__ : Any = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase )
return node.val
self.miss += 1
return None
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__magic_name__ : List[str] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__magic_name__ : Tuple = DoubleLinkedListNode(lowerCamelCase , lowerCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__magic_name__ : str = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__magic_name__ : List[Any] = value
self.list.add(lowerCamelCase )
@classmethod
def lowercase ( cls , lowerCamelCase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowerCamelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
__magic_name__ : Optional[Any] = LRUCache(lowerCamelCase )
__magic_name__ : Dict = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__magic_name__ : List[str] = func(*lowerCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase , '''cache_info''' , lowerCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
def lowerCAmelCase ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
__magic_name__ : Optional[int] = len(UpperCAmelCase )
for i in range(UpperCAmelCase ):
for j in range(i + 1, UpperCAmelCase ):
if numbers[j] < numbers[i]:
__magic_name__ , __magic_name__ : Dict = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowercase_ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 336
| 0
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = StableUnCLIPPipeline
lowerCAmelCase_ : List[str] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : int = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCAmelCase_ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = 32
UpperCAmelCase__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCAmelCase__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase__ = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=_UpperCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase__ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase )
UpperCAmelCase__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase__ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL()
UpperCAmelCase__ = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=0 ):
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
UpperCAmelCase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase__ = pipe("""anime turle""" , generator=_UpperCAmelCase , output_type="""np""" )
UpperCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 603
|
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = BigBirdTokenizer
lowerCAmelCase_ : Optional[int] = BigBirdTokenizerFast
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Dict = True
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """<s>"""
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(_UpperCAmelCase ) , 10_04 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = """I was born in 92000, and this is falsé."""
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = """Hello World!"""
UpperCAmelCase__ = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
UpperCAmelCase__ = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase__ = """ """.join(_UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors="""pt""" , return_token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = BigBirdConfig(attention_type="""original_full""" )
UpperCAmelCase__ = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
UpperCAmelCase__ = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {"""input_ids""": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 603
| 1
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 384
__lowerCAmelCase = 7
if "tiny" in model_name:
__lowerCAmelCase = 96
__lowerCAmelCase = (2, 2, 6, 2)
__lowerCAmelCase = (3, 6, 12, 24)
elif "small" in model_name:
__lowerCAmelCase = 96
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (3, 6, 12, 24)
elif "base" in model_name:
__lowerCAmelCase = 128
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (4, 8, 16, 32)
__lowerCAmelCase = 12
__lowerCAmelCase = 512
elif "large" in model_name:
__lowerCAmelCase = 192
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (6, 12, 24, 48)
__lowerCAmelCase = 12
__lowerCAmelCase = 768
# set label information
__lowerCAmelCase = 150
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "ade20k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = SwinConfig(
embed_dim=_UpperCamelCase , depths=_UpperCamelCase , num_heads=_UpperCamelCase , window_size=_UpperCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
__lowerCAmelCase = UperNetConfig(
backbone_config=_UpperCamelCase , auxiliary_in_channels=_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase , )
return config
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dct.pop(_UpperCamelCase )
__lowerCAmelCase = val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
__lowerCAmelCase = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = x.shape
__lowerCAmelCase = x.reshape(_UpperCamelCase , 4 , in_channel // 4 )
__lowerCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
return x
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = x.shape
__lowerCAmelCase = x.reshape(_UpperCamelCase , in_channel // 4 , 4 )
__lowerCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
return x
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = x.shape[0]
__lowerCAmelCase = x.reshape(4 , in_channel // 4 )
__lowerCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_UpperCamelCase )
return x
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = x.shape[0]
__lowerCAmelCase = x.reshape(in_channel // 4 , 4 )
__lowerCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_UpperCamelCase )
return x
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
__lowerCAmelCase = model_name_to_url[model_name]
__lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" , file_name=_UpperCamelCase )[
"state_dict"
]
for name, param in state_dict.items():
print(_UpperCamelCase , param.shape )
__lowerCAmelCase = get_upernet_config(_UpperCamelCase )
__lowerCAmelCase = UperNetForSemanticSegmentation(_UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(_UpperCamelCase )
if "bn" in key:
__lowerCAmelCase = key.replace("bn" , "batch_norm" )
__lowerCAmelCase = val
# rename keys
__lowerCAmelCase = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowerCAmelCase = reverse_correct_unfold_reduction_order(_UpperCamelCase )
if "norm" in key:
__lowerCAmelCase = reverse_correct_unfold_norm_order(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# verify on image
__lowerCAmelCase = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("RGB" )
__lowerCAmelCase = SegformerImageProcessor()
__lowerCAmelCase = processor(_UpperCamelCase , return_tensors="pt" ).pixel_values
with torch.no_grad():
__lowerCAmelCase = model(_UpperCamelCase )
__lowerCAmelCase = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowerCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
__lowerCAmelCase = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
__lowerCAmelCase = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
__lowerCAmelCase = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A : List[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 282
| 0
|
from copy import deepcopy
class lowercase__ :
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None )-> None:
'''simple docstring'''
if arr is None and size is not None:
lowerCAmelCase__ = size
lowerCAmelCase__ = [0] * size
elif arr is not None:
self.init(__UpperCAmelCase )
else:
raise ValueError("Either arr or size must be specified" )
def UpperCAmelCase ( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = deepcopy(__UpperCAmelCase )
for i in range(1 , self.size ):
lowerCAmelCase__ = self.next_(__UpperCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase ( self )-> list[int]:
'''simple docstring'''
lowerCAmelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase__ = self.next_(__UpperCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase )-> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase )-> int:
'''simple docstring'''
return index - (index & (-index))
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase__ = self.next_(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.add(__UpperCAmelCase , value - self.get(__UpperCAmelCase ) )
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
if right == 0:
return 0
lowerCAmelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase__ = self.prev(__UpperCAmelCase )
return result
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> int:
'''simple docstring'''
return self.prefix(__UpperCAmelCase ) - self.prefix(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
return self.query(__UpperCAmelCase , index + 1 )
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
|
import numpy as np
a_ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class lowercase__ :
def __init__( self )-> None:
'''simple docstring'''
lowerCAmelCase__ = np.array(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = np.where(letter == self.SQUARE )
lowerCAmelCase__ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = message.lower()
lowerCAmelCase__ = message.replace(" " , "" )
lowerCAmelCase__ = message.replace("j" , "i" )
lowerCAmelCase__ = np.empty((2, len(__UpperCAmelCase )) )
for letter_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase__ = numbers[0]
lowerCAmelCase__ = numbers[1]
lowerCAmelCase__ = first_step.reshape(2 * len(__UpperCAmelCase ) )
lowerCAmelCase__ = ""
for numbers_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = int(second_step[numbers_index * 2] )
lowerCAmelCase__ = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase__ = self.numbers_to_letter(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = encoded_message + letter
return encoded_message
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = message.lower()
message.replace(" " , "" )
lowerCAmelCase__ = np.empty(2 * len(__UpperCAmelCase ) )
for letter_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase__ = numbers[0]
lowerCAmelCase__ = numbers[1]
lowerCAmelCase__ = first_step.reshape((2, len(__UpperCAmelCase )) )
lowerCAmelCase__ = ""
for numbers_index in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ = int(second_step[0, numbers_index] )
lowerCAmelCase__ = int(second_step[1, numbers_index] )
lowerCAmelCase__ = self.numbers_to_letter(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = decoded_message + letter
return decoded_message
| 339
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
lowercase_ : Tuple = "segformer.encoder." + key
if key.startswith("backbone" ):
lowercase_ : Union[str, Any] = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowercase_ : Dict = key[key.find("patch_embed" ) + len("patch_embed" )]
lowercase_ : List[Any] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(__snake_case )-1}""" )
if "norm" in key:
lowercase_ : int = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowercase_ : List[str] = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
lowercase_ : Union[str, Any] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(__snake_case )-1}""" )
if "layer_norm1" in key:
lowercase_ : Dict = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
lowercase_ : str = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
lowercase_ : Tuple = key[key.find("block" ) + len("block" )]
lowercase_ : str = key.replace(F"""block{idx}""" , F"""block.{int(__snake_case )-1}""" )
if "attn.q" in key:
lowercase_ : Union[str, Any] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
lowercase_ : Optional[int] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
lowercase_ : Tuple = key.replace("attn" , "attention.self" )
if "fc1" in key:
lowercase_ : Optional[Any] = key.replace("fc1" , "dense1" )
if "fc2" in key:
lowercase_ : List[str] = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
lowercase_ : Dict = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
lowercase_ : int = key.replace("linear_fuse.conv" , "linear_fuse" )
lowercase_ : Any = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowercase_ : Dict = key[key.find("linear_c" ) + len("linear_c" )]
lowercase_ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(__snake_case )-1}""" )
if key.startswith("head" ):
lowercase_ : Tuple = key.replace("head" , "classifier" )
lowercase_ : Dict = value
return new_state_dict
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowercase_ : Optional[int] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowercase_ : int = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowercase_ : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
lowercase_ : List[Any] = kv_bias[: config.hidden_sizes[i]]
lowercase_ : List[str] = kv_weight[
config.hidden_sizes[i] :, :
]
lowercase_ : Tuple = kv_bias[
config.hidden_sizes[i] :
]
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ : List[Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = SegformerConfig()
lowercase_ : Any = False
# set attributes based on model_name
lowercase_ : int = "huggingface/label-files"
if "segformer" in model_name:
lowercase_ : Optional[int] = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
lowercase_ : Dict = 150
lowercase_ : Dict = "ade20k-id2label.json"
lowercase_ : Dict = (1, 150, 128, 128)
elif "city" in model_name:
lowercase_ : int = 19
lowercase_ : List[str] = "cityscapes-id2label.json"
lowercase_ : Optional[int] = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
lowercase_ : Any = True
lowercase_ : List[Any] = model_name[4:6]
lowercase_ : Tuple = 1000
lowercase_ : int = "imagenet-1k-id2label.json"
lowercase_ : List[str] = (1, 1000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
lowercase_ : int = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
lowercase_ : Optional[int] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase_ : Dict = idalabel
lowercase_ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowercase_ : Tuple = [64, 128, 320, 512]
lowercase_ : int = 256
elif size == "b2":
lowercase_ : Union[str, Any] = [64, 128, 320, 512]
lowercase_ : Dict = 768
lowercase_ : List[Any] = [3, 4, 6, 3]
elif size == "b3":
lowercase_ : Dict = [64, 128, 320, 512]
lowercase_ : int = 768
lowercase_ : Optional[int] = [3, 4, 18, 3]
elif size == "b4":
lowercase_ : Optional[int] = [64, 128, 320, 512]
lowercase_ : List[Any] = 768
lowercase_ : Optional[int] = [3, 8, 27, 3]
elif size == "b5":
lowercase_ : Any = [64, 128, 320, 512]
lowercase_ : Tuple = 768
lowercase_ : Optional[int] = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
lowercase_ : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
# prepare image
lowercase_ : List[Any] = prepare_img()
lowercase_ : Tuple = image_processor(images=__snake_case , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
lowercase_ : Dict = torch.load(__snake_case , map_location=torch.device("cpu" ) )
else:
lowercase_ : Optional[Any] = torch.load(__snake_case , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
lowercase_ : str = rename_keys(__snake_case , encoder_only=__snake_case )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__snake_case , __snake_case )
# create HuggingFace model and load state dict
if encoder_only:
lowercase_ : List[str] = False
lowercase_ : Dict = SegformerForImageClassification(__snake_case )
else:
lowercase_ : Any = SegformerForSemanticSegmentation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# forward pass
lowercase_ : str = model(__snake_case )
lowercase_ : Dict = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowercase_ : str = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowercase_ : str = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowercase_ : Tuple = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowercase_ : Optional[int] = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowercase_ : Any = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowercase_ : List[str] = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowercase_ : List[str] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowercase_ : Optional[Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowercase_ : List[str] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
lowercase_ : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 705
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = VideoToVideoSDPipeline
SCREAMING_SNAKE_CASE__ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
SCREAMING_SNAKE_CASE__ :Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
SCREAMING_SNAKE_CASE__ :Any = PipelineTesterMixin.required_optional_params - {"latents"}
SCREAMING_SNAKE_CASE__ :Any = False
# No `output_type`.
SCREAMING_SNAKE_CASE__ :Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCamelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_UpperCamelCase : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
_UpperCamelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
_UpperCamelCase : Any = CLIPTextModel(__a )
_UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Optional[int] , __a : List[Any]=0 ) -> int:
# 3 frames
_UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
_UpperCamelCase : Optional[Any] = torch.manual_seed(__a )
else:
_UpperCamelCase : Any = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : List[str] = VideoToVideoSDPipeline(**__a )
_UpperCamelCase : Dict = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Tuple = self.get_dummy_inputs(__a )
_UpperCamelCase : str = "np"
_UpperCamelCase : Optional[Any] = sd_pipe(**__a ).frames
_UpperCamelCase : List[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_UpperCamelCase : Optional[Any] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : List[Any] = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_UpperCamelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCamelCase : Optional[int] = torch.randn((1, 10, 3, 1024, 576) , generator=__a )
_UpperCamelCase : Dict = video.to("cuda" )
_UpperCamelCase : int = "Spiderman is surfing"
_UpperCamelCase : List[Any] = pipe(__a , video=__a , generator=__a , num_inference_steps=3 , output_type="pt" ).frames
_UpperCamelCase : List[Any] = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase_ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def snake_case ( A__ ,A__ ,A__ ,A__=None ):
# Initialise PyTorch model
UpperCAmelCase_ : List[Any] = XLNetConfig.from_json_file(A__ )
UpperCAmelCase_ : str = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
UpperCAmelCase_ : List[Any] = finetuning_task
UpperCAmelCase_ : Tuple = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_ : List[Any] = XLNetForSequenceClassification(A__ )
elif "squad" in finetuning_task:
UpperCAmelCase_ : List[str] = finetuning_task
UpperCAmelCase_ : int = XLNetForQuestionAnswering(A__ )
else:
UpperCAmelCase_ : Union[str, Any] = XLNetLMHeadModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(A__ ,A__ ,A__ )
# Save pytorch-model
UpperCAmelCase_ : Dict = os.path.join(A__ ,A__ )
UpperCAmelCase_ : int = os.path.join(A__ ,A__ )
print(F"""Save PyTorch model to {os.path.abspath(A__ )}""" )
torch.save(model.state_dict() ,A__ )
print(F"""Save configuration file to {os.path.abspath(A__ )}""" )
with open(A__ ,"w" ,encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase_ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('''DataClass''', Any)
lowerCAmelCase__ = NewType('''DataClassType''', Any)
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def a__ ( SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
lowerCAmelCase : Any = {str(SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda SCREAMING_SNAKE_CASE : str_to_choice.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( *,
SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : Any = dataclasses.MISSING , SCREAMING_SNAKE_CASE : Callable[[], Any] = dataclasses.MISSING , SCREAMING_SNAKE_CASE : dict = None , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase : Tuple = {}
if aliases is not None:
lowerCAmelCase : Union[str, Any] = aliases
if help is not None:
lowerCAmelCase : Optional[Any] = help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , default_factory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Iterable[DataClassType]
def __init__( self , snake_case__ , **snake_case__ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
lowerCAmelCase : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**snake_case__ )
if dataclasses.is_dataclass(snake_case__ ):
lowerCAmelCase : List[Any] = [dataclass_types]
lowerCAmelCase : List[str] = list(snake_case__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(snake_case__ )
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = f"""--{field.name}"""
lowerCAmelCase : Optional[Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , snake_case__ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
lowerCAmelCase : List[str] = kwargs.pop("aliases" , [] )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : int = [aliases]
lowerCAmelCase : int = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(snake_case__ , "UnionType" ) and isinstance(snake_case__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(snake_case__ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f""" Problem encountered in field '{field.name}'.""" )
if type(snake_case__ ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase : str = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase : Optional[int] = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase : List[Any] = (
field.type.__args__[0] if isinstance(snake_case__ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase : List[Any] = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase : List[Any] = {}
if origin_type is Literal or (isinstance(field.type , snake_case__ ) and issubclass(field.type , snake_case__ )):
if origin_type is Literal:
lowerCAmelCase : str = field.type.__args__
else:
lowerCAmelCase : List[str] = [x.value for x in field.type]
lowerCAmelCase : List[Any] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase : int = field.default
else:
lowerCAmelCase : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase : Dict = copy(snake_case__ )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase : str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase : int = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase : Any = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase : List[str] = "?"
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase : Union[str, Any] = True
elif isclass(snake_case__ ) and issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = field.type.__args__[0]
lowerCAmelCase : List[str] = "+"
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase : int = True
else:
lowerCAmelCase : Optional[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase : Union[str, Any] = field.default_factory()
else:
lowerCAmelCase : List[str] = True
parser.add_argument(snake_case__ , *snake_case__ , **snake_case__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase : Any = False
parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if hasattr(snake_case__ , "_argument_group_name" ):
lowerCAmelCase : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase : Any = self
try:
lowerCAmelCase : Dict[str, type] = get_type_hints(snake_case__ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(snake_case__ ):
lowerCAmelCase : Optional[int] = ".".join(map(snake_case__ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(snake_case__ ):
if not field.init:
continue
lowerCAmelCase : Any = type_hints[field.name]
self._parse_dataclass_field(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__=None , snake_case__=False , snake_case__=True , snake_case__=None , snake_case__=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase : Dict = []
if args_filename:
args_files.append(Path(snake_case__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(snake_case__ , type=snake_case__ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase , lowerCAmelCase : List[Any] = args_file_parser.parse_known_args(args=snake_case__ )
lowerCAmelCase : Optional[int] = vars(snake_case__ ).get(args_file_flag.lstrip("-" ) , snake_case__ )
if cmd_args_file_paths:
args_files.extend([Path(snake_case__ ) for p in cmd_args_file_paths] )
lowerCAmelCase : Optional[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase : List[str] = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.parse_known_args(args=snake_case__ )
lowerCAmelCase : List[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase : Union[str, Any] = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
lowerCAmelCase : List[str] = {k: v for k, v in vars(snake_case__ ).items() if k in keys}
for k in keys:
delattr(snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = dtype(**snake_case__ )
outputs.append(snake_case__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(snake_case__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = set(args.keys() )
lowerCAmelCase : Optional[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase : Optional[Any] = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
lowerCAmelCase : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase : Tuple = dtype(**snake_case__ )
outputs.append(snake_case__ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(snake_case__ )}""" )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
with open(Path(snake_case__ ) , encoding="utf-8" ) as open_json_file:
lowerCAmelCase : Dict = json.loads(open_json_file.read() )
lowerCAmelCase : Union[str, Any] = self.parse_dict(snake_case__ , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.parse_dict(yaml.safe_load(Path(snake_case__ ).read_text() ) , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
| 645
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = """speech_to_text"""
snake_case__ : List[str] = ["""past_key_values"""]
snake_case__ : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , __lowerCamelCase : Tuple=10_000 , __lowerCamelCase : int=12 , __lowerCamelCase : Optional[int]=2_048 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : List[Any]=2_048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : str=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : Tuple=256 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=1 , __lowerCamelCase : Dict=0 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Optional[Any]=6_000 , __lowerCamelCase : Any=1_024 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Optional[int]=(5, 5) , __lowerCamelCase : Optional[Any]=1_024 , __lowerCamelCase : Any=80 , __lowerCamelCase : str=1 , **__lowerCamelCase : Dict , ):
UpperCamelCase :List[str] = vocab_size
UpperCamelCase :Union[str, Any] = d_model
UpperCamelCase :Any = encoder_ffn_dim
UpperCamelCase :Tuple = encoder_layers
UpperCamelCase :int = encoder_attention_heads
UpperCamelCase :Tuple = decoder_ffn_dim
UpperCamelCase :str = decoder_layers
UpperCamelCase :Union[str, Any] = decoder_attention_heads
UpperCamelCase :int = dropout
UpperCamelCase :List[str] = attention_dropout
UpperCamelCase :List[str] = activation_dropout
UpperCamelCase :Tuple = activation_function
UpperCamelCase :str = init_std
UpperCamelCase :Optional[int] = encoder_layerdrop
UpperCamelCase :List[str] = decoder_layerdrop
UpperCamelCase :Any = use_cache
UpperCamelCase :str = encoder_layers
UpperCamelCase :Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase :Any = max_source_positions
UpperCamelCase :int = max_target_positions
UpperCamelCase :Tuple = num_conv_layers
UpperCamelCase :List[Any] = list(__lowerCamelCase )
UpperCamelCase :int = conv_channels
UpperCamelCase :Optional[Any] = input_feat_per_channel
UpperCamelCase :List[Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 590
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : Dict = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : Optional[Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : int
snake_case__ : Node | None
class _SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowerCamelCase : Iterable[int] ):
UpperCamelCase :Node | None = None
for i in sorted(__lowerCamelCase , reverse=__lowerCamelCase ):
UpperCamelCase :List[Any] = Node(__lowerCamelCase , self.head )
def __iter__( self : int ):
UpperCamelCase :List[str] = self.head
while node:
yield node.data
UpperCamelCase :Tuple = node.next_node
def __len__( self : Tuple ):
return sum(1 for _ in self )
def __str__( self : List[Any] ):
return " -> ".join([str(__lowerCamelCase ) for node in self] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : SortedLinkedList , __magic_name__ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__magic_name__ ) + list(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 590
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Tuple = b.T
UpperCamelCase__ : Tuple = np.sum(np.square(snake_case_) , axis=1)
UpperCamelCase__ : Optional[Any] = np.sum(np.square(snake_case_) , axis=0)
UpperCamelCase__ : Dict = np.matmul(snake_case_ , snake_case_)
UpperCamelCase__ : Optional[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = x.reshape(-1 , 3)
UpperCamelCase__ : Optional[int] = squared_euclidean_distance(snake_case_ , snake_case_)
return np.argmin(snake_case_ , axis=1)
class __lowercase (UpperCamelCase_ ):
_lowerCamelCase = ['''pixel_values''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any = None , UpperCAmelCase_ : int = True , UpperCAmelCase_ : Any = None , UpperCAmelCase_ : Dict = PILImageResampling.BILINEAR , UpperCAmelCase_ : Tuple = True , UpperCAmelCase_ : Dict = True , **UpperCAmelCase_ : int , ):
super().__init__(**__UpperCamelCase)
UpperCamelCase__ : Optional[int] = size if size is not None else {'''height''': 256, '''width''': 256}
UpperCamelCase__ : Dict = get_size_dict(__UpperCamelCase)
UpperCamelCase__ : List[str] = np.array(__UpperCamelCase) if clusters is not None else None
UpperCamelCase__ : Dict = do_resize
UpperCamelCase__ : str = size
UpperCamelCase__ : int = resample
UpperCamelCase__ : List[Any] = do_normalize
UpperCamelCase__ : Tuple = do_color_quantize
def __UpperCamelCase ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] = PILImageResampling.BILINEAR , UpperCAmelCase_ : Dict = None , **UpperCAmelCase_ : int , ):
UpperCamelCase__ : str = get_size_dict(__UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}')
return resize(
__UpperCamelCase , size=(size['height'], size['width']) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str = None , ):
UpperCamelCase__ : Tuple = rescale(image=__UpperCamelCase , scale=1 / 1_27.5 , data_format=__UpperCamelCase)
UpperCamelCase__ : int = image - 1
return image
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : List[Any] = None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Any = None , UpperCAmelCase_ : Any = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : List[str] = None , UpperCAmelCase_ : Union[str, Any] = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Dict = size if size is not None else self.size
UpperCamelCase__ : int = get_size_dict(__UpperCamelCase)
UpperCamelCase__ : Tuple = resample if resample is not None else self.resample
UpperCamelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase__ : Tuple = clusters if clusters is not None else self.clusters
UpperCamelCase__ : str = np.array(__UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = make_list_of_images(__UpperCamelCase)
if not valid_images(__UpperCamelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.')
# All transformations expect numpy arrays.
UpperCamelCase__ : Any = [to_numpy_array(__UpperCamelCase) for image in images]
if do_resize:
UpperCamelCase__ : Optional[int] = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase) for image in images]
if do_normalize:
UpperCamelCase__ : Any = [self.normalize(image=__UpperCamelCase) for image in images]
if do_color_quantize:
UpperCamelCase__ : List[Any] = [to_channel_dimension_format(__UpperCamelCase , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase__ : Optional[int] = np.array(__UpperCamelCase)
UpperCamelCase__ : Any = color_quantize(__UpperCamelCase , __UpperCamelCase).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
UpperCamelCase__ : str = images.shape[0]
UpperCamelCase__ : List[str] = images.reshape(__UpperCamelCase , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase__ : List[str] = list(__UpperCamelCase)
else:
UpperCamelCase__ : Union[str, Any] = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase) for image in images]
UpperCamelCase__ : Dict = {'''input_ids''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase)
| 596
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a ( UpperCamelCase_ ,unittest.TestCase ):
__lowercase = VideoToVideoSDPipeline
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
__lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowercase = False
# No `output_type`.
__lowercase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : str =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A__ : Any =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ : Dict =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
A__ : List[Any] =CLIPTextModel(__UpperCamelCase )
A__ : int =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A__ : List[str] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 )-> Tuple:
'''simple docstring'''
A__ : Any =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('''mps''' ):
A__ : Dict =torch.manual_seed(__UpperCamelCase )
else:
A__ : List[Any] =torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A__ : Optional[int] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowerCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
A__ : Any ='''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ : Dict =self.get_dummy_components()
A__ : str =VideoToVideoSDPipeline(**__UpperCamelCase )
A__ : Tuple =sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A__ : List[str] =self.get_dummy_inputs(__UpperCamelCase )
A__ : Any ='''np'''
A__ : Optional[Any] =sd_pipe(**__UpperCamelCase ).frames
A__ : Optional[int] =frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
A__ : List[Any] =np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowerCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
A__ : Dict =VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A__ : List[Any] =torch.Generator(device='''cpu''' ).manual_seed(0 )
A__ : Any =torch.randn((1, 10, 3, 10_24, 5_76) , generator=__UpperCamelCase )
A__ : Tuple =video.to('''cuda''' )
A__ : List[Any] ='''Spiderman is surfing'''
A__ : int =pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type='''pt''' ).frames
A__ : List[Any] =np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 416
| 0
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Dict = []
__lowerCAmelCase : List[Any] = 11
__lowerCAmelCase : Dict = int('1' + '0' * digit_len )
for num in range(_UpperCamelCase , _UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_UpperCamelCase , _UpperCamelCase ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__lowerCAmelCase : Optional[int] = 10
return solutions
def __lowerCAmelCase (_UpperCamelCase = 2 ):
__lowerCAmelCase : Any = 1.0
for fraction in fraction_list(_UpperCamelCase ):
__lowerCAmelCase : List[str] = Fraction(_UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(_UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 549
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : List[str] = start
__lowerCAmelCase : Union[str, Any] = end
__lowerCAmelCase : Union[str, Any] = val
__lowerCAmelCase : Dict = (start + end) // 2
__lowerCAmelCase : Dict = left
__lowerCAmelCase : List[Any] = right
def __repr__( self ):
return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = collection
__lowerCAmelCase : Optional[int] = function
if self.collection:
__lowerCAmelCase : str = self._build_tree(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self._update_tree(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return self._query_range(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if start == end:
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.collection[start] )
__lowerCAmelCase : Tuple = (start + end) // 2
__lowerCAmelCase : List[str] = self._build_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self._build_tree(mid + 1 , _SCREAMING_SNAKE_CASE )
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.fn(left.val , right.val ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if node.start == i and node.end == i:
__lowerCAmelCase : int = val
return
if i <= node.mid:
self._update_tree(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
self._update_tree(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.fn(node.left.val , node.right.val )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _SCREAMING_SNAKE_CASE , node.mid ) , self._query_range(node.right , node.mid + 1 , _SCREAMING_SNAKE_CASE ) , )
else:
# range in right child tree
return self._query_range(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if self.root is not None:
__lowerCAmelCase : Optional[Any] = Queue()
queue.put(self.root )
while not queue.empty():
__lowerCAmelCase : str = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
lowerCamelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 549
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase : List[Any] = ["""input_features""", """is_longer"""]
def __init__( self : str , _lowercase : Optional[Any]=64 , _lowercase : Union[str, Any]=48_000 , _lowercase : List[str]=480 , _lowercase : Optional[int]=10 , _lowercase : List[str]=1_024 , _lowercase : Any=0.0 , _lowercase : str=False , _lowercase : Tuple = 0 , _lowercase : List[Any] = 14_000 , _lowercase : Optional[int] = None , _lowercase : int = "fusion" , _lowercase : List[Any] = "repeatpad" , **_lowercase : Any , ):
"""simple docstring"""
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , return_attention_mask=A_ , **A_ , )
_UpperCamelCase: Any = top_db
_UpperCamelCase: Optional[Any] = truncation
_UpperCamelCase: Any = padding
_UpperCamelCase: Dict = fft_window_size
_UpperCamelCase: Optional[int] = (fft_window_size >> 1) + 1
_UpperCamelCase: List[str] = hop_length
_UpperCamelCase: Optional[int] = max_length_s
_UpperCamelCase: Union[str, Any] = max_length_s * sampling_rate
_UpperCamelCase: Any = sampling_rate
_UpperCamelCase: List[str] = frequency_min
_UpperCamelCase: int = frequency_max
_UpperCamelCase: str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A_ , min_frequency=A_ , max_frequency=A_ , sampling_rate=A_ , norm=A_ , mel_scale='''htk''' , )
_UpperCamelCase: Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A_ , min_frequency=A_ , max_frequency=A_ , sampling_rate=A_ , norm='''slaney''' , mel_scale='''slaney''' , )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: str = copy.deepcopy(self.__dict__ )
_UpperCamelCase: Any = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase ( self : Dict , _lowercase : Dict , _lowercase : Tuple = None ):
"""simple docstring"""
_UpperCamelCase: List[str] = spectrogram(
A_ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=A_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def lowerCAmelCase ( self : int , _lowercase : List[Any] , _lowercase : Any , _lowercase : int ):
"""simple docstring"""
_UpperCamelCase: List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCamelCase: List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCamelCase: Optional[Any] = [0]
# randomly choose index for each part
_UpperCamelCase: Dict = np.random.choice(ranges[0] )
_UpperCamelCase: List[Any] = np.random.choice(ranges[1] )
_UpperCamelCase: Optional[Any] = np.random.choice(ranges[2] )
_UpperCamelCase: List[str] = mel[idx_front : idx_front + chunk_frames, :]
_UpperCamelCase: Tuple = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCamelCase: Optional[int] = mel[idx_back : idx_back + chunk_frames, :]
_UpperCamelCase: List[str] = torch.tensor(mel[None, None, :] )
_UpperCamelCase: Optional[Any] = torch.nn.functional.interpolate(
A_ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=A_ )
_UpperCamelCase: Dict = mel_shrink[0][0].numpy()
_UpperCamelCase: List[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCAmelCase ( self : Optional[Any] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCamelCase: str = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCamelCase: Any = len(A_ ) - max_length
_UpperCamelCase: Any = np.random.randint(0 , overflow + 1 )
_UpperCamelCase: Optional[Any] = waveform[idx : idx + max_length]
_UpperCamelCase: Tuple = self._np_extract_fbank_features(A_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCamelCase: Union[str, Any] = self._np_extract_fbank_features(A_ , self.mel_filters )
_UpperCamelCase: List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCamelCase: Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCamelCase: Union[str, Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCamelCase: int = False
else:
_UpperCamelCase: Dict = self._random_mel_fusion(A_ , A_ , A_ )
_UpperCamelCase: Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
_UpperCamelCase: Tuple = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCamelCase: Optional[Any] = int(max_length / len(A_ ) )
_UpperCamelCase: str = np.stack(np.tile(A_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCamelCase: int = int(max_length / len(A_ ) )
_UpperCamelCase: List[str] = np.stack(np.tile(A_ , A_ ) )
_UpperCamelCase: Union[str, Any] = np.pad(A_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
_UpperCamelCase: Union[str, Any] = self._np_extract_fbank_features(A_ , self.mel_filters )
_UpperCamelCase: Any = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCamelCase: List[str] = self._np_extract_fbank_features(A_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict = None , _lowercase : List[str] = None , _lowercase : Dict = None , _lowercase : Union[str, Any] = None , _lowercase : Any = None , **_lowercase : str , ):
"""simple docstring"""
_UpperCamelCase: Any = truncation if truncation is not None else self.truncation
_UpperCamelCase: Optional[int] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_UpperCamelCase: Dict = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_UpperCamelCase: List[Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCamelCase: int = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
_UpperCamelCase: str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCamelCase: Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase: Union[str, Any] = [np.asarray(A_ )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCamelCase: Any = [
self._get_input_mel(A_ , max_length if max_length else self.nb_max_samples , A_ , A_ )
for waveform in raw_speech
]
_UpperCamelCase: Union[str, Any] = []
_UpperCamelCase: List[str] = []
for mel, longer in padded_inputs:
input_mel.append(A_ )
is_longer.append(A_ )
if truncation == "fusion" and sum(A_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCamelCase: Tuple = np.random.randint(0 , len(A_ ) )
_UpperCamelCase: Any = True
if isinstance(input_mel[0] , A_ ):
_UpperCamelCase: str = [np.asarray(A_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCamelCase: Optional[int] = [[longer] for longer in is_longer]
_UpperCamelCase: Optional[Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
_UpperCamelCase: Tuple = BatchFeature(A_ )
if return_tensors is not None:
_UpperCamelCase: List[str] = input_features.convert_to_tensors(A_ )
return input_features
| 271
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 30
SCREAMING_SNAKE_CASE__ = self.seq_length + self.mem_len
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 99
SCREAMING_SNAKE_CASE__ = [10, 50, 80]
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1_28
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
SCREAMING_SNAKE_CASE__ = 0.01
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase_ ( self ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLModel(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLLMHeadModel(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model([input_ids_a, mems_a] ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLForSequenceClassification(A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase__ : List[str] = () if is_tf_available() else ()
lowerCamelCase__ : List[Any] = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[Any] = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , d_embed=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
assert isinstance(A_ , tf.keras.layers.Layer )
SCREAMING_SNAKE_CASE__ = model.get_bias()
assert name is None
else:
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE__ = model.get_bias()
assert name is None
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFTransfoXLModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
SCREAMING_SNAKE_CASE__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
SCREAMING_SNAKE_CASE__ = model.generate(A_ , max_length=2_00 , do_sample=A_ )
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
| 100
| 0
|
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
return "".join(chr(ord(UpperCAmelCase_ ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 648
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Tuple =logging.get_logger(__name__)
__lowercase : str ={
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class A ( __lowercase ):
_snake_case ='''swinv2'''
_snake_case ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Any , _lowerCAmelCase: List[str]=224 , _lowerCAmelCase: Union[str, Any]=4 , _lowerCAmelCase: Tuple=3 , _lowerCAmelCase: Optional[int]=96 , _lowerCAmelCase: str=[2, 2, 6, 2] , _lowerCAmelCase: List[str]=[3, 6, 12, 24] , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Dict=4.0 , _lowerCAmelCase: str=True , _lowerCAmelCase: int=0.0 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: Optional[Any]=0.1 , _lowerCAmelCase: int="gelu" , _lowerCAmelCase: int=False , _lowerCAmelCase: Optional[Any]=0.02 , _lowerCAmelCase: Union[str, Any]=1e-5 , _lowerCAmelCase: str=32 , **_lowerCAmelCase: int , ) -> int:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =embed_dim
UpperCAmelCase_ =depths
UpperCAmelCase_ =len(_lowerCAmelCase )
UpperCAmelCase_ =num_heads
UpperCAmelCase_ =window_size
UpperCAmelCase_ =mlp_ratio
UpperCAmelCase_ =qkv_bias
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =drop_path_rate
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =use_absolute_embeddings
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ =int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
UpperCAmelCase_ =(0, 0, 0, 0)
| 54
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = XGLMTokenizer
a_ = XGLMTokenizerFast
a_ = True
a_ = True
def lowercase ( self : int ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = '<pad>'
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_0_8 )
def lowercase ( self : int ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowercase ( self : List[Any] ) -> int:
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def lowercase ( self : Optional[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ , f.name )
__lowerCAmelCase = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase_ )
__lowerCAmelCase = pickle.dumps(lowerCAmelCase_ )
pickle.loads(lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> int:
if not self.test_rust_tokenizer:
return
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
__lowerCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ )
__lowerCAmelCase = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = 'Hello World!'
__lowerCAmelCase = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
__lowerCAmelCase = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase ( self : Union[str, Any] ) -> Tuple:
# fmt: off
__lowerCAmelCase = {
'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='facebook/xglm-564M' , padding=lowerCAmelCase_ , )
| 421
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
__lowerCAmelCase = torch.permute(lowerCAmelCase_, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ):
# linear layer
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
__lowerCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any ):
if "metadata" in layer:
__lowerCAmelCase = layer.split('metadata' )
__lowerCAmelCase = ''.join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__lowerCAmelCase = layer.split('kvstore' )
__lowerCAmelCase = ''.join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__lowerCAmelCase = layer.split('/' )
__lowerCAmelCase = '/'.join(split_layer[:-1] )
__lowerCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCAmelCase = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__lowerCAmelCase = 'file'
else:
__lowerCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = rename_keys(lowerCAmelCase_ )
__lowerCAmelCase = {}
for k, v in current_block.items():
__lowerCAmelCase = v
__lowerCAmelCase = new_current_block
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Dict, lowerCAmelCase_ : str = WEIGHTS_NAME ):
__lowerCAmelCase = convert_file_size_to_int(lowerCAmelCase_ )
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint', 'rb' ) as fp:
__lowerCAmelCase = serialization.msgpack_restore(fp.read() )['optimizer']['target']
__lowerCAmelCase = flatten_dict(lowerCAmelCase_, sep='/' )
__lowerCAmelCase = {}
for layer in checkpoint_info.keys():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_key_and_tensorstore_dict(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if curr_real_layer_name in all_layers:
__lowerCAmelCase = content
else:
__lowerCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCAmelCase = torch.tensor(lowerCAmelCase_ )
__lowerCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCAmelCase , __lowerCAmelCase = rename_base_flax_keys(tuple(key.split('/' ) ), lowerCAmelCase_ )
__lowerCAmelCase = '/'.join(lowerCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCAmelCase = os.path.join(
lowerCAmelCase_, weights_name.replace('.bin', F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_, lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = raw_weights.to(getattr(lowerCAmelCase_, lowerCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCAmelCase = os.path.join(lowerCAmelCase_, weights_name.replace('.bin', F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_, lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for idx, shard in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = weights_name.replace(
'.bin', F"""-{idx+1:05d}-of-{len(lowerCAmelCase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__lowerCAmelCase = os.path.join(lowerCAmelCase_, weights_name.replace('.bin', F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowerCAmelCase_, os.path.join(lowerCAmelCase_, lowerCAmelCase_ ) )
__lowerCAmelCase = shard
for key in shard:
__lowerCAmelCase = shard_file
# Add the metadata
__lowerCAmelCase = {'total_size': total_size}
__lowerCAmelCase = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowerCAmelCase_, lowerCAmelCase_ ), 'w', encoding='utf-8' ) as f:
__lowerCAmelCase = json.dumps(lowerCAmelCase_, indent=2, sort_keys=lowerCAmelCase_ ) + '\n'
f.write(lowerCAmelCase_ )
return metadata, index
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_snake_case : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a_ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCAmelCase = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__lowerCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted', device_map='auto' )
__lowerCAmelCase = TaTokenizer.from_pretrained('t5-small' )
__lowerCAmelCase = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__lowerCAmelCase = tokenizer(lowerCAmelCase_, return_tensors='pt' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 421
| 1
|
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if index == number_of_items:
return 0
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Any = 0
UpperCamelCase : int = knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
UpperCamelCase : Union[str, Any] = values[index] + knapsack(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = torch.load(_snake_case , map_location="cpu" )
__magic_name__ : List[Any] = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
__magic_name__ : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__magic_name__ : Optional[Any] = v
else:
__magic_name__ : Tuple = v
__magic_name__ : int = chkpt["params"]
__magic_name__ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_snake_case , (torch.FloatTensor, numpy.ndarray) )}
__magic_name__ : Optional[int] = chkpt["dico_word2id"]
__magic_name__ : List[str] = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
__magic_name__ : Union[str, Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__magic_name__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
__magic_name__ : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(_snake_case , _snake_case )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_snake_case , indent=2 ) + "\n" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_snake_case , indent=2 ) + "\n" )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 124
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : List[str] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , _UpperCAmelCase ):
_UpperCAmelCase : Optional[Any] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCAmelCase : Tuple = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : Any = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase : Tuple = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : List[Any] = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , eta=_UpperCAmelCase , use_clipped_model_output=_UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
_UpperCAmelCase : Any = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 257
| 0
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase_ : Union[str, Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
lowerCamelCase_ : Optional[int] = F"""{src_lang}-{tgt_lang}"""
lowerCamelCase_ : Optional[Any] = F"""\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"""
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowerCamelCase_ : int = os.path.join(lowerCAmelCase_ , '''README.md''' )
print(F"""Generating {path}""" )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowerCAmelCase_ )
# make sure we are under the root of the project
__lowercase : List[Any] = Path(__file__).resolve().parent.parent.parent
__lowercase : Any = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase : Dict = model_name.split('''-''')
__lowercase : Dict = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 422
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[str] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowercase : Tuple = 192
__lowercase : List[Any] = 768
__lowercase : Tuple = 12
__lowercase : List[Any] = 3
__lowercase : str = [800, 1333]
__lowercase : List[Any] = False
elif yolos_name == "yolos_s_dWr":
__lowercase : Any = 330
__lowercase : int = 14
__lowercase : List[str] = 6
__lowercase : Tuple = 1320
elif "yolos_s" in yolos_name:
__lowercase : int = 384
__lowercase : Union[str, Any] = 1536
__lowercase : List[str] = 12
__lowercase : Optional[Any] = 6
elif "yolos_b" in yolos_name:
__lowercase : List[Any] = [800, 1344]
__lowercase : Tuple = 91
__lowercase : Union[str, Any] = """huggingface/label-files"""
__lowercase : Any = """coco-detection-id2label.json"""
__lowercase : Dict = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Union[str, Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : List[str] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowerCAmelCase_ : dict , lowerCAmelCase_ : YolosConfig , lowerCAmelCase_ : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__lowercase : str = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Any = in_proj_weight[: config.hidden_size, :]
__lowercase : Tuple = in_proj_bias[: config.hidden_size]
__lowercase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : Dict = in_proj_weight[-config.hidden_size :, :]
__lowercase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( lowerCAmelCase_ : str ):
if "backbone" in name:
__lowercase : Union[str, Any] = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__lowercase : Dict = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__lowercase : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__lowercase : Dict = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__lowercase : str = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase : Optional[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__lowercase : List[str] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase : Optional[int] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase : str = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__lowercase : Optional[Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__lowercase : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__lowercase : List[str] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def snake_case_ ( lowerCAmelCase_ : dict , lowerCAmelCase_ : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
__lowercase : Optional[int] = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
__lowercase : int = key.split(""".""" )
__lowercase : List[str] = int(key_split[2] )
__lowercase : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowercase : Dict = val[:dim, :]
__lowercase : Union[str, Any] = val[
dim : dim * 2, :
]
__lowercase : Union[str, Any] = val[-dim:, :]
else:
__lowercase : str = val[:dim]
__lowercase : List[str] = val[dim : dim * 2]
__lowercase : Any = val[-dim:]
else:
__lowercase : List[str] = val
return orig_state_dict
def snake_case_ ( ):
__lowercase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ):
__lowercase : Optional[int] = get_yolos_config(lowerCAmelCase_ )
# load original state_dict
__lowercase : Any = torch.load(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
# load 🤗 model
__lowercase : Union[str, Any] = YolosForObjectDetection(lowerCAmelCase_ )
model.eval()
__lowercase : str = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by YolosImageProcessor
__lowercase : str = 800 if yolos_name != """yolos_ti""" else 512
__lowercase : Dict = YolosImageProcessor(format="""coco_detection""" , size=lowerCAmelCase_ )
__lowercase : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowercase : Optional[int] = model(**lowerCAmelCase_ )
__lowercase , __lowercase : Tuple = outputs.logits, outputs.pred_boxes
__lowercase , __lowercase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
__lowercase : Any = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__lowercase : Dict = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__lowercase : List[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__lowercase : Dict = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__lowercase : List[str] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__lowercase : List[str] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__lowercase : str = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__lowercase : List[str] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__lowercase : List[Any] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__lowercase : Optional[Any] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
__lowercase : Any = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
__lowercase : List[str] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCAmelCase_ , organization="""hustvl""" )
model.push_to_hub(lowerCAmelCase_ , organization="""hustvl""" )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 149
| 0
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =DistilBertTokenizer
_lowerCamelCase =DistilBertTokenizerFast
_lowerCamelCase =True
@slow
def __snake_case ( self : List[Any] ):
UpperCAmelCase = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 721
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a__ : Optional[Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
a__ : Tuple = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a__ : Optional[int] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a__ : Dict = sorted(arg_to_scheduler.keys())
a__ : List[str] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCAmelCase__ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : List[str] , a__ : argparse.Namespace , a__ : str=None , a__ : Union[str, Any]="base" , a__ : List[str]=None , a__ : Optional[Any]=None , a__ : List[str]=None , **a__ : Dict , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(a__ )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=a__ , **a__ , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , a__ , a__ ):
assert hasattr(self.config , a__ ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , a__ , getattr(self.hparams , a__ ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=a__ , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=a__ , )
else:
UpperCAmelCase = model
def __snake_case ( self : List[str] , *a__ : Optional[Any] , **a__ : str ):
UpperCAmelCase = self.model_type.from_pretrained(*a__ , **a__ )
def __snake_case ( self : int ):
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def __snake_case ( self : int ):
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
a__ , lr=self.hparams.learning_rate , scale_parameter=a__ , relative_step=a__ )
else:
UpperCAmelCase = AdamW(
a__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case ( self : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[int] ):
return self.validation_step(a__ , a__ )
def __snake_case ( self : int , a__ : Any ):
return self.validation_end(a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case ( self : Dict , a__ : Tuple ):
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=a__ )
UpperCAmelCase = len(self.train_dataloader().dataset )
def __snake_case ( self : Any , a__ : str , a__ : int , a__ : bool = False ):
raise NotImplementedError('''You must implement this for your task''' )
def __snake_case ( self : Tuple ):
return self.train_loader
def __snake_case ( self : Any ):
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=a__ )
def __snake_case ( self : Tuple ):
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=a__ )
def __snake_case ( self : Tuple , a__ : Optional[Any] ):
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
a__ , list(filter(a__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __snake_case ( self : str , a__ : Dict[str, Any] ):
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(a__ )
self.tokenizer.save_pretrained(a__ )
@staticmethod
def __snake_case ( a__ : str , a__ : Tuple ):
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=a__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(a__ ).parent / '''test_run''' / '''cache''' ) , type=a__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=a__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=a__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=a__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=a__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=a__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=a__ , metavar=a__ , type=a__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=a__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=a__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=a__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=a__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=a__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=a__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def __snake_case ( self : Union[str, Any] , a__ : Tuple , a__ : Union[str, Any] ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def __snake_case ( self : Dict , a__ : Tuple , a__ : str ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(a__ )
class lowerCAmelCase__ ( pl.Callback ):
'''simple docstring'''
def __snake_case ( self : Any , a__ : int , a__ : List[Any] ):
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(a__ )
def __snake_case ( self : List[Any] , a__ : pl.Trainer , a__ : pl.LightningModule ):
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(a__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
def __snake_case ( self : str , a__ : pl.Trainer , a__ : pl.LightningModule ):
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(a__ , '''w''' ) as writer:
for key in sorted(a__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(a__ , str(metrics[key] ) ) )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> None:
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=SCREAMING_SNAKE_CASE_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=SCREAMING_SNAKE_CASE_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=SCREAMING_SNAKE_CASE_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=SCREAMING_SNAKE_CASE_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def __snake_case ( SCREAMING_SNAKE_CASE_ : BaseTransformer , SCREAMING_SNAKE_CASE_ : argparse.Namespace , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[int]=[] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(SCREAMING_SNAKE_CASE_ )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 16
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
SCREAMING_SNAKE_CASE_ , weights_summary=SCREAMING_SNAKE_CASE_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=SCREAMING_SNAKE_CASE_ , val_check_interval=1 , num_sanity_val_steps=2 , **SCREAMING_SNAKE_CASE_ , )
if args.do_train:
trainer.fit(SCREAMING_SNAKE_CASE_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 570
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
class __lowercase ( _A ):
lowercase = 'upernet'
def __init__( self : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=5_12 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Dict=[1, 2, 3, 6] , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=0.4 , __lowerCamelCase : str=3_84 , __lowerCamelCase : str=2_56 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Any=2_55 , **__lowerCamelCase : str , ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase = backbone_config.get('''model_type''' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCamelCase )
lowercase = backbone_config
lowercase = hidden_size
lowercase = initializer_range
lowercase = pool_scales
lowercase = use_auxiliary_head
lowercase = auxiliary_loss_weight
lowercase = auxiliary_in_channels
lowercase = auxiliary_channels
lowercase = auxiliary_num_convs
lowercase = auxiliary_concat_input
lowercase = loss_ignore_index
def __a ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 604
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowercase :
def __init__( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Dict=10 , __lowerCamelCase : Optional[int]=[8, 16, 32, 64] , __lowerCamelCase : Tuple=[1, 1, 2, 1] , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int="relu" , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Dict=None , __lowerCamelCase : int=["stage2", "stage3", "stage4"] , __lowerCamelCase : Optional[Any]=[2, 3, 4] , __lowerCamelCase : Union[str, Any]=1 , ) -> Any:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(__lowerCamelCase )
lowercase = out_features
lowercase = out_indices
lowercase = num_groups
def __a ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def __a ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __a ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
lowercase = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase = None
lowercase = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self : Dict ) -> int:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase ,lowercase ,lowercase = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _A , _A , unittest.TestCase ):
lowercase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def __a ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase = BitModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : int ) -> str:
'''simple docstring'''
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __a ( self : str ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __a ( self : str ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __a ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def __a ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCamelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __a ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __a ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def __a ( self : str ) -> Dict:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def __a ( self : Dict ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase = layer_type
lowercase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __a ( self : Any ) -> Dict:
'''simple docstring'''
pass
def __a ( self : Tuple ) -> int:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def __a ( self : Any ) -> Optional[int]:
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __a ( self : Tuple ) -> List[str]:
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __a ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase = model(**__lowerCamelCase )
# verify the logits
lowercase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowercase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@require_torch
class __lowercase ( _A , unittest.TestCase ):
lowercase = (BitBackbone,) if is_torch_available() else ()
lowercase = BitConfig
lowercase = False
def __a ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase = BitModelTester(self )
| 604
| 1
|
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
assert column_title.isupper()
__UpperCAmelCase = 0
__UpperCAmelCase = len(_lowerCAmelCase ) - 1
__UpperCAmelCase = 0
while index >= 0:
__UpperCAmelCase = (ord(column_title[index] ) - 6_4) * pow(2_6 , _lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654
| 0
|
'''simple docstring'''
def A_ ( ):
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def A_ ( _lowerCamelCase : str ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A_ ( ):
return next(i for i in triangle_number_generator() if count_divisors(_lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]=False ):
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__lowercase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowercase : List[str] = """"""
else:
__lowercase : Optional[Any] = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__lowercase : str = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Optional[int] = in_proj_bias[: config.hidden_size]
__lowercase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : Dict = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : Tuple = in_proj_bias[-config.hidden_size :]
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
__lowercase : Union[str, Any] = dct.pop(lowerCAmelCase_ )
__lowercase : Union[str, Any] = val
def snake_case_ ( ):
__lowercase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : List[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ):
__lowercase : Tuple = DeiTConfig()
# all deit models have fine-tuned heads
__lowercase : Dict = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__lowercase : Any = 1000
__lowercase : str = """huggingface/label-files"""
__lowercase : Union[str, Any] = """imagenet-1k-id2label.json"""
__lowercase : Any = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : int = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
__lowercase : Union[str, Any] = int(deit_name[-6:-4] )
__lowercase : Any = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
__lowercase : Tuple = 192
__lowercase : Optional[int] = 768
__lowercase : str = 12
__lowercase : Tuple = 3
elif deit_name[9:].startswith("""small""" ):
__lowercase : List[str] = 384
__lowercase : Tuple = 1536
__lowercase : List[str] = 12
__lowercase : Optional[Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
__lowercase : List[Any] = 1024
__lowercase : Optional[Any] = 4096
__lowercase : Optional[Any] = 24
__lowercase : Optional[Any] = 16
# load original model from timm
__lowercase : int = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowercase : Dict = timm_model.state_dict()
__lowercase : List[str] = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
__lowercase : int = DeiTForImageClassificationWithTeacher(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by DeiTImageProcessor
__lowercase : Tuple = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__lowercase : int = DeiTImageProcessor(size=lowerCAmelCase_ , crop_size=config.image_size )
__lowercase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowercase : List[Any] = encoding["""pixel_values"""]
__lowercase : Any = model(lowerCAmelCase_ )
__lowercase : Optional[int] = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1e-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 707
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'xlm-roberta'
def __init__( self , snake_case_=3_05_22 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =hidden_act
lowercase =intermediate_size
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =type_vocab_size
lowercase =initializer_range
lowercase =layer_norm_eps
lowercase =position_embedding_type
lowercase =use_cache
lowercase =classifier_dropout
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
def _A( self ):
if self.task == "multiple-choice":
lowercase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 72
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _snake_case( UpperCAmelCase ):
__snake_case: Union[List[PIL.Image.Image], np.ndarray]
__snake_case: Optional[List[bool]]
__snake_case: Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 531
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[str] ):
UpperCamelCase :Optional[int] = tempfile.mkdtemp()
UpperCamelCase :str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCamelCase :List[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
"""do_convert_rgb""": True,
}
UpperCamelCase :Optional[Any] = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int , **__lowerCamelCase : Tuple ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : List[str] , **__lowerCamelCase : List[Any] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : List[Any] , **__lowerCamelCase : List[Any] ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase :Optional[Any] = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self : Dict ):
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase :str = self.get_image_processor()
UpperCamelCase :Union[str, Any] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase :List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
UpperCamelCase :List[str] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase :List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def _A ( self : Union[str, Any] ):
UpperCamelCase :int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Any = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
UpperCamelCase :str = self.get_image_processor(do_normalize=__lowerCamelCase )
UpperCamelCase :Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _A ( self : Any ):
UpperCamelCase :int = self.get_image_processor()
UpperCamelCase :Dict = self.get_tokenizer()
UpperCamelCase :Optional[int] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Optional[Any] = self.prepare_image_inputs()
UpperCamelCase :Any = image_processor(__lowerCamelCase , return_tensors="""np""" )
UpperCamelCase :List[Any] = processor(images=__lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self : int ):
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Optional[Any] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :int = """Alexandra,T-shirt的价格是15便士。"""
UpperCamelCase :Any = processor(text=__lowerCamelCase )
UpperCamelCase :List[str] = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self : int ):
UpperCamelCase :Any = self.get_image_processor()
UpperCamelCase :int = self.get_tokenizer()
UpperCamelCase :List[Any] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :List[str] = """Alexandra,T-shirt的价格是15便士。"""
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :int = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _A ( self : str ):
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Tuple = processor.batch_decode(__lowerCamelCase )
UpperCamelCase :Optional[int] = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : str ):
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :List[Any] = self.get_tokenizer()
UpperCamelCase :List[Any] = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :List[str] = """Alexandra,T-shirt的价格是15便士。"""
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :int = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 708
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Any=True , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : int=5 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : int=512 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Dict="last" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=0 , ):
UpperCamelCase :Optional[int] = parent
UpperCamelCase :Optional[Any] = batch_size
UpperCamelCase :List[Any] = seq_length
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :Dict = use_input_lengths
UpperCamelCase :Optional[Any] = use_token_type_ids
UpperCamelCase :Dict = use_labels
UpperCamelCase :Tuple = gelu_activation
UpperCamelCase :Union[str, Any] = sinusoidal_embeddings
UpperCamelCase :Optional[int] = causal
UpperCamelCase :Optional[Any] = asm
UpperCamelCase :str = n_langs
UpperCamelCase :List[str] = vocab_size
UpperCamelCase :Union[str, Any] = n_special
UpperCamelCase :int = hidden_size
UpperCamelCase :Any = num_hidden_layers
UpperCamelCase :int = num_attention_heads
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :Dict = attention_probs_dropout_prob
UpperCamelCase :Tuple = max_position_embeddings
UpperCamelCase :str = type_sequence_label_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :Optional[int] = num_choices
UpperCamelCase :List[str] = summary_type
UpperCamelCase :Tuple = use_proj
UpperCamelCase :Any = scope
UpperCamelCase :Optional[int] = bos_token_id
def _A ( self : Any ):
UpperCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase :Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase :Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase :Optional[Any] = None
UpperCamelCase :int = None
UpperCamelCase :Optional[int] = None
if self.use_labels:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :Any = ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A ( self : Union[str, Any] ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , ):
UpperCamelCase :Tuple = XLMModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Tuple = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase )
UpperCamelCase :Optional[Any] = model(__lowerCamelCase , langs=__lowerCamelCase )
UpperCamelCase :int = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , ):
UpperCamelCase :List[Any] = XLMWithLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Optional[int] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , ):
UpperCamelCase :Optional[int] = XLMForQuestionAnsweringSimple(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[str] = model(__lowerCamelCase )
UpperCamelCase :Any = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
UpperCamelCase :int = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , ):
UpperCamelCase :List[str] = XLMForQuestionAnswering(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[str] = model(__lowerCamelCase )
UpperCamelCase :List[Any] = model(
__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , )
UpperCamelCase :List[Any] = model(
__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , )
((UpperCamelCase) , ) :List[str] = result_with_labels.to_tuple()
UpperCamelCase :List[Any] = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase )
((UpperCamelCase) , ) :Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , ):
UpperCamelCase :str = XLMForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[Any] = model(__lowerCamelCase )
UpperCamelCase :Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , ):
UpperCamelCase :Any = self.num_labels
UpperCamelCase :int = XLMForTokenClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Optional[int] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : str , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : List[Any] , ):
UpperCamelCase :List[Any] = self.num_choices
UpperCamelCase :str = XLMForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase :int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase :Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase :str = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : Any ):
UpperCamelCase :Tuple = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :str = config_and_inputs
UpperCamelCase :Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case__ : Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case__ : List[str] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=False ):
UpperCamelCase :Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCamelCase :Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
UpperCamelCase :Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _A ( self : List[str] ):
UpperCamelCase :List[Any] = XLMModelTester(self )
UpperCamelCase :int = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=37 )
def _A ( self : Dict ):
self.config_tester.run_common_tests()
def _A ( self : Dict ):
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__lowerCamelCase )
def _A ( self : Any ):
UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__lowerCamelCase )
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__lowerCamelCase )
def _A ( self : Optional[Any] ):
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__lowerCamelCase )
def _A ( self : Optional[Any] ):
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowerCamelCase )
def _A ( self : int , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=1 ):
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(
[isinstance(__lowerCamelCase , __lowerCamelCase ) for iter_attentions in attentions] , [True] * len(__lowerCamelCase ) )
self.assertEqual(len(__lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__lowerCamelCase ):
# adds PAD dummy token
UpperCamelCase :List[Any] = min_length + idx + 1
UpperCamelCase :Any = min_length + idx + 1
UpperCamelCase :Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__lowerCamelCase ) )
def _A ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=False , __lowerCamelCase : str=1 ):
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(
[isinstance(__lowerCamelCase , __lowerCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(__lowerCamelCase ) , )
self.assertEqual(len(__lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__lowerCamelCase ):
# adds PAD dummy token
UpperCamelCase :List[str] = min_length + idx + 1
UpperCamelCase :List[str] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__lowerCamelCase ) , )
pass
@slow
def _A ( self : Union[str, Any] ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :Union[str, Any] = XLMModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _A ( self : List[Any] ):
UpperCamelCase :List[Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(__lowerCamelCase )
UpperCamelCase :List[Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=__lowerCamelCase ) # the president
UpperCamelCase :Optional[int] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCamelCase :Optional[Any] = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __lowerCamelCase )
| 590
| 0
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase_ :
def __init__( self : Dict , _A : Optional[Any] , _A : Dict=13 , _A : int=7 , _A : List[str]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : List[Any]=99 , _A : Any=32 , _A : Union[str, Any]=2 , _A : Optional[int]=4 , _A : int=37 , _A : Any="gelu" , _A : int=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : List[Any]=16 , _A : Tuple=2 , _A : List[Any]=0.02 , _A : List[Any]=False , _A : int=True , _A : Union[str, Any]="None" , _A : Optional[Any]=3 , _A : Dict=4 , _A : Any=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : List[str] , _A : Optional[Any] , _A : Tuple , _A : Optional[Any] , _A : str , _A : str , _A : Optional[Any] , _A : Dict ):
_UpperCamelCase = TFDebertaVaModel(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[Any] , _A : int , _A : List[str] , _A : str , _A : Tuple , _A : List[Any] , _A : Dict , _A : Union[str, Any] ):
_UpperCamelCase = TFDebertaVaForMaskedLM(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : int , _A : str , _A : str , _A : Union[str, Any] , _A : int , _A : Optional[Any] , _A : str , _A : Dict ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[Any] , _A : List[str] , _A : Tuple , _A : int , _A : int , _A : Tuple , _A : Tuple , _A : int ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForTokenClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Dict , _A : Any , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Tuple , _A : Any , _A : Union[str, Any] ):
_UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = TFDebertaVaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_A )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase_ ( self : int ):
pass
@slow
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
_UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(_A , attention_mask=_A )[0]
_UpperCamelCase = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
| 10
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __lt__( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self[-1] < other[-1]
def __eq__( self , __SCREAMING_SNAKE_CASE ) ->Dict:
return self[-1] == other[-1]
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> list:
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(snake_case__ , snake_case__ )
if i != len(snake_case__ ):
stacks[i].append(snake_case__ )
else:
stacks.append(snake_case__ )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(snake_case__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase__ : int = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 312
| 0
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase ( _lowerCamelCase ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def UpperCAmelCase ( _lowerCamelCase ):
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
A : Optional[int] = metric_id
class lowerCamelCase_ :
'''simple docstring'''
a__ = [MetricMock(_A ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if "tmp_path" in args:
A : Optional[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(_lowerCamelCase , match="https://huggingface.co/docs/evaluate" ):
func(*_lowerCamelCase )
| 17
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ) -> Dict:
A : Optional[Any] = 4
A : List[str] = 32
A : Any = (32, 32)
A : str = torch.manual_seed(0 )
A : int = torch.device(__lowerCamelCase )
A : List[str] = (batch_size, num_channels) + sizes
A : Dict = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
A : int = {"hidden_states": hidden_states}
if include_temb:
A : Any = 1_28
A : List[str] = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
A : str = torch.manual_seed(1 )
A : Tuple = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
A : Dict = floats_tensor((batch_size, 32, 32) ).to(__lowerCamelCase )
if include_skip_sample:
A : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : Dict = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
A : Dict = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : List[Any] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A : int = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A : Any = output[0, -1, -3:, -3:]
A : Union[str, Any] = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A , A : Tuple = self.prepare_init_args_and_inputs_for_common()
A : str = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Optional[int] = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A : Optional[Any] = output[0]
A : List[str] = torch.device(__lowerCamelCase )
A : List[str] = randn_tensor(output.shape , device=__lowerCamelCase )
A : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward()
| 17
| 1
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Any = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : Optional[int] = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase ) -> Dict:
UpperCAmelCase__ : Optional[int] = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
UpperCAmelCase__ : str = unicodedata.category(lowerCAmelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = -1_0_0
SCREAMING_SNAKE_CASE = "pt"
def _a (self , _lowerCamelCase ):
"""simple docstring"""
import torch
UpperCAmelCase__ : Union[str, Any] = """label""" if """label""" in features[0].keys() else """labels"""
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : List[str] = self.tokenizer.pad(
_lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1]
UpperCAmelCase__ : Dict = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : Any = [
list(_lowerCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(_lowerCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : int = [
[self.label_pad_token_id] * (sequence_length - len(_lowerCamelCase )) + list(_lowerCamelCase ) for label in labels
]
UpperCAmelCase__ : Any = [feature["""ner_tags"""] for feature in features]
UpperCAmelCase__ : Union[str, Any] = padding_tensor(_lowerCamelCase , -1 , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Tuple = [feature["""original_entity_spans"""] for feature in features]
UpperCAmelCase__ : List[str] = padding_tensor(_lowerCamelCase , (-1, -1) , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = {k: torch.tensor(_lowerCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 182
|
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 182
| 1
|
"""simple docstring"""
from math import isqrt
def __lowercase ( lowerCamelCase_ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCamelCase_ ) + 1 ) )
def __lowercase ( lowerCamelCase_ : int = 10**6 ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCamelCase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 112
|
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase = 500000
_lowerCamelCase , _lowerCamelCase = os.path.split(__file__)
_lowerCamelCase = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __lowercase ( lowerCamelCase_ : datasets.Dataset , **lowerCamelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = dataset.map(**lowerCamelCase_ )
@get_duration
def __lowercase ( lowerCamelCase_ : datasets.Dataset , **lowerCamelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = dataset.filter(**lowerCamelCase_ )
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
SCREAMING_SNAKE_CASE__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , "dataset.arrow" ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ : Union[str, Any] ):
return tokenizer(examples["text"] )
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type="numpy" ):
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type="pandas" ):
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type="torch" , columns="numbers" ):
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , "wb" ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 112
| 1
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase_ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __a ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowerCAmelCase_ = self.diffusers_dir
shutil.copy(
os.path.join(UpperCAmelCase , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase_ = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase_ = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase_ = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
lowerCAmelCase_ = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(UpperCAmelCase , '''w''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase )
with open(UpperCAmelCase , '''r''' ) as f:
self.assertTrue(f.read() , UpperCAmelCase )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , UpperCAmelCase ) , )
# Copy consistency with a really long name
lowerCAmelCase_ = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , UpperCAmelCase , UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , UpperCAmelCase , overwrite_result=re.sub('''DDPM''' , '''Test''' , UpperCAmelCase ) , )
| 552
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = 'Hello, World!'
lowercase_ = 'en_XX'
def UpperCAmelCase ( _lowercase : str , _lowercase : str , _lowercase : bool ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ = Path('''data_bin''' )
lowerCAmelCase_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCAmelCase_ = xmod.model.encoder.sentence_encoder
lowerCAmelCase_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCAmelCase_ = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase_ = xmod_sent_encoder.embed_tokens.weight
lowerCAmelCase_ = xmod_sent_encoder.embed_positions.weight
lowerCAmelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCAmelCase_ = xmod_sent_encoder.layernorm_embedding.weight
lowerCAmelCase_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase_ = model.roberta.encoder.layer[i]
lowerCAmelCase_ = xmod_sent_encoder.layers[i]
# self attention
lowerCAmelCase_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCAmelCase_ = xmod_layer.self_attn.q_proj.weight
lowerCAmelCase_ = xmod_layer.self_attn.q_proj.bias
lowerCAmelCase_ = xmod_layer.self_attn.k_proj.weight
lowerCAmelCase_ = xmod_layer.self_attn.k_proj.bias
lowerCAmelCase_ = xmod_layer.self_attn.v_proj.weight
lowerCAmelCase_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCAmelCase_ = xmod_layer.self_attn.out_proj.weight
lowerCAmelCase_ = xmod_layer.self_attn.out_proj.bias
lowerCAmelCase_ = xmod_layer.self_attn_layer_norm.weight
lowerCAmelCase_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCAmelCase_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCAmelCase_ = xmod_layer.fca.weight
lowerCAmelCase_ = xmod_layer.fca.bias
# output
lowerCAmelCase_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCAmelCase_ = xmod_layer.fca.weight
lowerCAmelCase_ = xmod_layer.fca.bias
lowerCAmelCase_ = xmod_layer.final_layer_norm.weight
lowerCAmelCase_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCAmelCase_ = xmod_layer.adapter_layer_norm.weight
lowerCAmelCase_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCAmelCase_ = bert_output.adapter_modules[lang_code]
lowerCAmelCase_ = xmod_layer.adapter_modules[lang_code]
lowerCAmelCase_ = from_adapter.fca.weight
lowerCAmelCase_ = from_adapter.fca.bias
lowerCAmelCase_ = from_adapter.fca.weight
lowerCAmelCase_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCAmelCase_ = xmod_sent_encoder.layer_norm.weight
lowerCAmelCase_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCAmelCase_ = xmod.model.encoder.lm_head.dense.weight
lowerCAmelCase_ = xmod.model.encoder.lm_head.dense.bias
lowerCAmelCase_ = xmod.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase_ = xmod.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase_ = xmod.model.encoder.lm_head.weight
lowerCAmelCase_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase_ = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCAmelCase_ = model(_lowercase )[0]
if classification_head:
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCAmelCase_ = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCAmelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCAmelCase_ = torch.allclose(_lowercase , _lowercase , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
lowercase_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 552
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Tuple = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
snake_case__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 618
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case__ : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'])
def _snake_case (__lowercase , __lowercase):
inspect_dataset(__lowercase , __lowercase)
UpperCamelCase_ = path + '.py'
assert script_name in os.listdir(__lowercase)
assert "__pycache__" not in os.listdir(__lowercase)
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning')
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning')
@pytest.mark.parametrize('path' , ['accuracy'])
def _snake_case (__lowercase , __lowercase):
inspect_metric(__lowercase , __lowercase)
UpperCamelCase_ = path + '.py'
assert script_name in os.listdir(__lowercase)
assert "__pycache__" not in os.listdir(__lowercase)
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_config_info(__lowercase , config_name=__lowercase)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with pytest.raises(__lowercase):
get_dataset_config_info(__lowercase , config_name=__lowercase)
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = get_dataset_config_names(__lowercase)
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_infos(__lowercase)
assert list(infos.keys()) == expected_configs
UpperCamelCase_ = expected_configs[0]
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = get_dataset_infos(__lowercase)
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with pytest.raises(__lowercase):
get_dataset_split_names(__lowercase , config_name=__lowercase)
| 618
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase = 6 )-> None:
lowercase__ = None
lowercase__ = None
self.create_linked_list(_lowerCamelCase )
def snake_case_( self , _lowerCamelCase )-> None:
lowercase__ = Node()
lowercase__ = current_node
lowercase__ = current_node
lowercase__ = current_node
for _ in range(1 , _lowerCamelCase ):
lowercase__ = Node()
lowercase__ = current_node
lowercase__ = previous_node
lowercase__ = current_node
lowercase__ = self.front
lowercase__ = previous_node
def snake_case_( self )-> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def snake_case_( self )-> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def snake_case_( self , _lowerCamelCase )-> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__ = self.rear.next
if self.rear:
lowercase__ = data
def snake_case_( self )-> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__ = self.front.data
lowercase__ = None
return data
lowercase__ = self.front
lowercase__ = old_front.next
lowercase__ = old_front.data
lowercase__ = None
return data
def snake_case_( self )-> None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def snake_case_( self )-> None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __A :
"""simple docstring"""
def __init__( self )-> None:
lowercase__ = None
lowercase__ = None
lowercase__ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
def _lowerCAmelCase ( lowercase : int , lowercase : str , lowercase : LevitConfig , lowercase : Path , lowercase : bool = True ) ->List[Any]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
lowercase__ = timm.create_model('''levit_128s''' , pretrained=lowercase )
else:
lowercase__ = timm.create_model('''levit_128''' , pretrained=lowercase )
if hidden_sizes == 1_9_2:
lowercase__ = timm.create_model('''levit_192''' , pretrained=lowercase )
if hidden_sizes == 2_5_6:
lowercase__ = timm.create_model('''levit_256''' , pretrained=lowercase )
if hidden_sizes == 3_8_4:
lowercase__ = timm.create_model('''levit_384''' , pretrained=lowercase )
from_model.eval()
lowercase__ = LevitForImageClassificationWithTeacher(lowercase ).eval()
lowercase__ = OrderedDict()
lowercase__ = from_model.state_dict()
lowercase__ = list(from_model.state_dict().keys() )
lowercase__ = list(our_model.state_dict().keys() )
print(len(lowercase ) , len(lowercase ) )
for i in range(len(lowercase ) ):
lowercase__ = weights[og_keys[i]]
our_model.load_state_dict(lowercase )
lowercase__ = torch.randn((2, 3, 2_2_4, 2_2_4) )
lowercase__ = from_model(lowercase )
lowercase__ = our_model(lowercase ).logits
assert torch.allclose(lowercase , lowercase ), "The model logits don't match the original one."
lowercase__ = name
print(lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def _lowerCAmelCase ( lowercase : Path , lowercase : str = None , lowercase : bool = True ) ->int:
"""simple docstring"""
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = 1_0_0_0
lowercase__ = (1, num_labels)
lowercase__ = '''huggingface/label-files'''
lowercase__ = num_labels
lowercase__ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowercase ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = partial(lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase )
lowercase__ = {
'''levit-128S''': 1_2_8,
'''levit-128''': 1_2_8,
'''levit-192''': 1_9_2,
'''levit-256''': 2_5_6,
'''levit-384''': 3_8_4,
}
lowercase__ = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowercase , names_to_config[model_name] , lowercase , lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowercase , lowercase , lowercase , lowercase )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 161
| 1
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowercase ( SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
_snake_case : List[Any] = VideoMAEConfig()
set_architecture_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "finetuned" not in model_name:
_snake_case : str = False
if "finetuned" in model_name:
_snake_case : Union[str, Any] = """huggingface/label-files"""
if "kinetics" in model_name:
_snake_case : Dict = 400
_snake_case : int = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
_snake_case : int = 174
_snake_case : List[str] = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
_snake_case : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case : Dict = idalabel
_snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if "small" in model_name:
_snake_case : Union[str, Any] = 384
_snake_case : List[Any] = 1_536
_snake_case : Optional[int] = 12
_snake_case : str = 16
_snake_case : int = 12
_snake_case : List[Any] = 3
_snake_case : Optional[int] = 192
_snake_case : List[Any] = 768
elif "large" in model_name:
_snake_case : Tuple = 1_024
_snake_case : int = 4_096
_snake_case : Union[str, Any] = 24
_snake_case : str = 16
_snake_case : Dict = 12
_snake_case : Any = 8
_snake_case : Union[str, Any] = 512
_snake_case : Tuple = 2_048
elif "huge" in model_name:
_snake_case : Tuple = 1_280
_snake_case : Dict = 5_120
_snake_case : List[str] = 32
_snake_case : Tuple = 16
_snake_case : int = 12
_snake_case : int = 8
_snake_case : str = 640
_snake_case : List[Any] = 2_560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
if "encoder." in name:
_snake_case : Optional[Any] = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
_snake_case : Optional[Any] = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
_snake_case : Optional[Any] = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
_snake_case : Dict = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_snake_case : str = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_snake_case : List[str] = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
_snake_case : List[str] = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
_snake_case : Union[str, Any] = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
_snake_case : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
_snake_case : str = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
_snake_case : Union[str, Any] = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
_snake_case : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : Tuple = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
_snake_case : Optional[int] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
_snake_case : List[Any] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
_snake_case : Optional[int] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case : Tuple = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case : Union[str, Any] = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
_snake_case : List[str] = name.replace("""head""" , """classifier""" )
return name
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
_snake_case : str = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if key.startswith("""encoder.""" ):
_snake_case : Union[str, Any] = key.replace("""encoder.""" , """""" )
if "qkv" in key:
_snake_case : int = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
_snake_case : Optional[Any] = config.decoder_hidden_size
_snake_case : Any = int(key_split[2] )
_snake_case : Union[str, Any] = """decoder.decoder_layers."""
if "weight" in key:
_snake_case : List[str] = val[:dim, :]
_snake_case : str = val[dim : dim * 2, :]
_snake_case : List[str] = val[-dim:, :]
else:
_snake_case : List[str] = config.hidden_size
_snake_case : Optional[Any] = int(key_split[1] )
_snake_case : Tuple = """videomae.encoder.layer."""
if "weight" in key:
_snake_case : Dict = val[:dim, :]
_snake_case : Union[str, Any] = val[dim : dim * 2, :]
_snake_case : Any = val[-dim:, :]
else:
_snake_case : Optional[Any] = val
return orig_state_dict
def lowercase ( ) -> Dict:
_snake_case : Any = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
_snake_case : str = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
_snake_case : str = get_videomae_config(SCREAMING_SNAKE_CASE__ )
if "finetuned" in model_name:
_snake_case : List[Any] = VideoMAEForVideoClassification(SCREAMING_SNAKE_CASE__ )
else:
_snake_case : int = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
# download original checkpoint, hosted on Google Drive
_snake_case : List[str] = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
if "model" in files:
_snake_case : Dict = files["""model"""]
else:
_snake_case : Dict = files["""module"""]
_snake_case : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify model on basic input
_snake_case : Any = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case : int = prepare_video()
_snake_case : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
_snake_case : List[str] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
_snake_case : Dict = torch.load(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = model(**SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = outputs.logits
_snake_case : Optional[Any] = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case : List[Any] = torch.Size([1, 400] )
_snake_case : int = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case : Tuple = torch.Size([1, 174] )
_snake_case : Tuple = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
_snake_case : Dict = torch.Size([1, 1_408, 1_536] )
_snake_case : str = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
_snake_case : Tuple = torch.Size([1, 1_408, 1_536] )
_snake_case : str = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case : Optional[Any] = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
_snake_case : List[Any] = torch.Size([1, 1_408, 1_536] )
_snake_case : Optional[Any] = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case : Optional[Any] = torch.Size([1, 400] )
_snake_case : Any = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case : Dict = torch.Size([1, 400] )
_snake_case : Tuple = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case : str = torch.Size([1, 400] )
_snake_case : Tuple = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case : List[Any] = torch.Size([1, 400] )
_snake_case : Optional[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
_snake_case : Optional[int] = torch.Size([1, 1_408, 1_536] )
_snake_case : str = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case : str = torch.Size([1, 174] )
_snake_case : List[str] = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
_snake_case : Dict = torch.Size([1, 1_408, 1_536] )
_snake_case : Dict = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case : Any = torch.Size([1, 174] )
_snake_case : List[Any] = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case : str = outputs.loss
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 198
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 198
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase (self ) -> Tuple:
_snake_case = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
_snake_case = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
_snake_case = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
_snake_case = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_snake_case = model(UpperCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase , atol=1e-3 ) )
@slow
def lowercase (self ) -> Optional[int]:
_snake_case = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
_snake_case = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
_snake_case = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
_snake_case = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_snake_case = model(UpperCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase , atol=1e-3 ) )
| 585
|
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
__lowerCAmelCase = True
from torch.cuda.amp import autocast
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Whether to log verbose messages or not."} , )
lowerCAmelCase_ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowerCAmelCase_ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowerCAmelCase_ = field(
default=0.999995 , metadata={"help": "Decay of gumbel temperature during training."} )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
_snake_case = logging.WARNING
if model_args.verbose_logging:
_snake_case = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_snake_case = logging.INFO
logger.setLevel(_SCREAMING_SNAKE_CASE )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCAmelCase_ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowerCAmelCase_ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase_ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase_ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = "longest"
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __call__(self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
_snake_case = self.feature_extractor.pad(
UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
_snake_case = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
_snake_case = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_snake_case = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
_snake_case = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_snake_case = 1
_snake_case = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_snake_case = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase , min_masks=2 , )
return batch
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=1.0 , **UpperCAmelCase ) -> Optional[int]:
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
_snake_case = 0
_snake_case = max_gumbel_temp
_snake_case = min_gumbel_temp
_snake_case = gumbel_temp_decay
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> torch.Tensor:
model.train()
_snake_case = self._prepare_inputs(UpperCAmelCase )
if self.use_amp:
with autocast():
_snake_case = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
else:
_snake_case = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_snake_case = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_snake_case = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_snake_case = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case, _snake_case, _snake_case = parser.parse_args_into_dataclasses()
configure_logger(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Downloading and loading a dataset from the hub.
_snake_case = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_snake_case = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_SCREAMING_SNAKE_CASE )
def prepare_dataset(_SCREAMING_SNAKE_CASE ):
# check that all files have the correct sampling rate
_snake_case, _snake_case = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_snake_case = datasets.map(
_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
_snake_case = vectorized_datasets.filter(
lambda _SCREAMING_SNAKE_CASE : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_SCREAMING_SNAKE_CASE ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_snake_case = vectorized_datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_snake_case = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
_snake_case = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
_snake_case = DataCollatorForWavaVecaPretraining(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
_snake_case = WavaVecaPreTrainer(
model=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_SCREAMING_SNAKE_CASE , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 585
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[int] = tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case__ : Optional[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case__ : List[Any] = tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case__ : Any = tf_top_k_top_p_filtering(__A , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case__ : Optional[Any] = output[output != -float("inf" )]
snake_case__ : Optional[Any] = tf.cast(
tf.where(tf.not_equal(__A , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__A , __A , rtol=1e-1_2 )
tf.debugging.assert_equal(__A , __A )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , UpperCamelCase_ ):
"""simple docstring"""
if is_tf_available():
a_ = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def _lowercase ( self : Tuple ):
# TF-only test: tf.saved_model export
snake_case__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case__ : Any = 2
snake_case__ : List[Any] = 2
class SCREAMING_SNAKE_CASE__ ( tf.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : List[str] ):
super(__A , self ).__init__()
snake_case__ : Union[str, Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=__A , )
def _lowercase ( self : Optional[Any] , __A : List[str] , __A : str ):
snake_case__ : List[str] = self.model.generate(
input_ids=__A , attention_mask=__A , max_new_tokens=__A , return_dict_in_generate=__A , )
return {"sequences": outputs["sequences"]}
snake_case__ : int = [[2, 0], [1_0_2, 1_0_3]]
snake_case__ : Tuple = [[1, 0], [1, 1]]
snake_case__ : Optional[Any] = DummyModel(model=__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__A , __A , signatures={"serving_default": dummy_model.serving} )
snake_case__ : Tuple = tf.saved_model.load(__A ).signatures["serving_default"]
for batch_size in range(1 , len(__A ) + 1 ):
snake_case__ : Any = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case__ : Optional[Any] = serving_func(**__A )["sequences"]
snake_case__ : Optional[int] = test_model.generate(**__A , max_new_tokens=__A )
tf.debugging.assert_equal(__A , __A )
@slow
def _lowercase ( self : Dict ):
# TF-only test: tf.saved_model export
snake_case__ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case__ : Any = 1
snake_case__ : Optional[int] = 2
class SCREAMING_SNAKE_CASE__ ( tf.Module ):
"""simple docstring"""
def __init__( self : Any , __A : str ):
super(__A , self ).__init__()
snake_case__ : Any = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=__A , )
def _lowercase ( self : int , __A : Tuple , __A : str ):
snake_case__ : int = self.model.generate(
input_ids=__A , attention_mask=__A , max_new_tokens=__A , return_dict_in_generate=__A , )
return {"sequences": outputs["sequences"]}
snake_case__ : Tuple = [[2], [1_0_2, 1_0_3]]
snake_case__ : Optional[Any] = [[1], [1, 1]]
snake_case__ : Tuple = DummyModel(model=__A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__A , __A , signatures={"serving_default": dummy_model.serving} )
snake_case__ : Union[str, Any] = tf.saved_model.load(__A ).signatures["serving_default"]
for input_row in range(len(__A ) ):
snake_case__ : Dict = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case__ : int = serving_func(**__A )["sequences"]
snake_case__ : Union[str, Any] = test_model.generate(**__A , max_new_tokens=__A )
tf.debugging.assert_equal(__A , __A )
@slow
@require_tensorflow_text
def _lowercase ( self : int ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=__A )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
snake_case__ : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__A , "spiece.model" ) , "rb" ).read() )
snake_case__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def _lowercase ( self : Union[str, Any] , __A : Any , *__A : Dict , **__A : str ):
snake_case__ : Dict = self.tokenizer.tokenize(__A )
snake_case__, snake_case__ : List[Any] = text.pad_model_inputs(
__A , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case__ : str = self.model.generate(input_ids=__A , attention_mask=__A )
return self.tokenizer.detokenize(__A )
snake_case__ : Any = CompleteSentenceTransformer()
snake_case__ : str = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
snake_case__ : str = complete_model(__A )
snake_case__ : Optional[int] = tf.keras.Model(__A , __A )
keras_model.save(__A )
def _lowercase ( self : Tuple ):
# Has PT equivalent: this test relies on random sampling
snake_case__ : Any = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 1_0,
"temperature": 0.7,
}
snake_case__ : Union[str, Any] = 1_4
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case__ : Tuple = "Hello, my dog is cute and"
snake_case__ : str = tokenizer(__A , return_tensors="tf" )
snake_case__ : str = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case__ : Any = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
snake_case__ : int = model.generate(**__A , eos_token_id=__A , **__A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case__ : Optional[int] = [6_3_8, 1_9_8]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
snake_case__ : Union[str, Any] = model.generate(**__A , eos_token_id=__A , **__A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _lowercase ( self : str ):
# Has PT equivalent: ample use of framework-specific code
snake_case__ : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case__ : Optional[int] = "Hugging Face is a technology company based in New York and Paris."
snake_case__ : List[str] = bart_tokenizer(__A , return_tensors="tf" ).input_ids
snake_case__ : Any = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case__ : str = bart_model.generate(__A ).numpy()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Tuple , __A : str , __A : Optional[Any]=None , **__A : Optional[int] ):
return super().call(__A , **__A )
snake_case__ : Union[str, Any] = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case__ : Optional[Any] = bart_model.generate(__A , foo="bar" ).numpy()
self.assertTrue(np.array_equal(__A , __A ) )
class SCREAMING_SNAKE_CASE__ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def _lowercase ( self : int , __A : Union[str, Any] , **__A : Optional[int] ):
return super().call(__A , **__A )
snake_case__ : str = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case__ : List[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case__ : str = bart_model.generate(__A ).numpy()
with self.assertRaises(__A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__A , foo="bar" )
| 25
|
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__: int = logging.get_logger(__name__)
lowerCAmelCase__: Dict = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : str = 'mctct'
def __init__( self , __lowerCAmelCase=8_065 , __lowerCAmelCase=1_536 , __lowerCAmelCase=36 , __lowerCAmelCase=6_144 , __lowerCAmelCase=4 , __lowerCAmelCase=384 , __lowerCAmelCase=920 , __lowerCAmelCase=1e-5 , __lowerCAmelCase=0.3 , __lowerCAmelCase="relu" , __lowerCAmelCase=0.02 , __lowerCAmelCase=0.3 , __lowerCAmelCase=0.3 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=0.3 , __lowerCAmelCase=1 , __lowerCAmelCase=(7,) , __lowerCAmelCase=(3,) , __lowerCAmelCase=80 , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase="sum" , __lowerCAmelCase=False , **__lowerCAmelCase , ):
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = attention_head_dim
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = layerdrop
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE_ : Tuple = eos_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = conv_glu_dim
SCREAMING_SNAKE_CASE_ : Union[str, Any] = conv_dropout
SCREAMING_SNAKE_CASE_ : int = num_conv_layers
SCREAMING_SNAKE_CASE_ : Dict = input_feat_per_channel
SCREAMING_SNAKE_CASE_ : Any = input_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = conv_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ : str = ctc_zero_infinity
# prevents config testing fail with exporting to json
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = list(__lowerCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 345
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCAmelCase__: Dict = logging.get_logger(__name__)
lowerCAmelCase__: Dict = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class snake_case_ :
def __init__( self , __lowerCAmelCase=None , **__lowerCAmelCase ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('model_save_dir' , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.get('latest_model_name' , __lowerCAmelCase )
def __call__( self , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = {k: np.array(__lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(__lowerCAmelCase , __lowerCAmelCase )
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ : Tuple = 'CPUExecutionProvider'
return ort.InferenceSession(__lowerCAmelCase , providers=[provider] , sess_options=__lowerCAmelCase )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE_ : int = Path(__lowerCAmelCase ).joinpath(__lowerCAmelCase )
try:
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE_ : str = self.model_save_dir.joinpath(__lowerCAmelCase )
if src_path.exists():
SCREAMING_SNAKE_CASE_ : List[str] = Path(__lowerCAmelCase ).joinpath(__lowerCAmelCase )
try:
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
except shutil.SameFileError:
pass
def __A ( self , __lowerCAmelCase , **__lowerCAmelCase , ):
if os.path.isfile(__lowerCAmelCase ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
# saving model weights/files
self._save_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def __A ( cls , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , provider=__lowerCAmelCase , sess_options=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = Path(__lowerCAmelCase )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE_ : int = hf_hub_download(
repo_id=__lowerCAmelCase , filename=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : str = Path(__lowerCAmelCase ).parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(__lowerCAmelCase ).name
SCREAMING_SNAKE_CASE_ : Tuple = OnnxRuntimeModel.load_model(__lowerCAmelCase , provider=__lowerCAmelCase , sess_options=__lowerCAmelCase )
return cls(model=__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def __A ( cls , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Tuple = None
if len(str(__lowerCAmelCase ).split('@' ) ) == 2:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = model_id.split('@' )
return cls._from_pretrained(
model_id=__lowerCAmelCase , revision=__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , **__lowerCAmelCase , )
| 345
| 1
|
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def a__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def a__ ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="uniform_average" , lowerCAmelCase__=True ):
_A= mean_squared_error(
lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__ )
return {"mse": mse}
| 476
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
UpperCAmelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase_ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase_ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase_ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase_ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase_ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase_ = state_dict['''cls.predictions.decoder.weight''']
UpperCAmelCase_ = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[F"cls.predictions.transform.dense.{w}"]
UpperCAmelCase_ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 476
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __magic_name__ ( unittest.TestCase ):
UpperCamelCase_ = inspect.getfile(accelerate.test_utils )
UpperCamelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCamelCase_ = ['''accelerate''', '''launch''']
UpperCamelCase_ = Path.home() / '''.cache/huggingface/accelerate'''
UpperCamelCase_ = '''default_config.yaml'''
UpperCamelCase_ = config_folder / config_file
UpperCamelCase_ = config_folder / '''_default_config.yaml'''
UpperCamelCase_ = Path('''tests/test_configs''' )
@classmethod
def lowercase_ ( cls ) -> List[Any]:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase_ ( cls ) -> Optional[Any]:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Optional[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=A_ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(A_ ), self.test_file_path] , env=os.environ.copy() )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __magic_name__ ( unittest.TestCase ):
UpperCamelCase_ = '''test-tpu'''
UpperCamelCase_ = '''us-central1-a'''
UpperCamelCase_ = '''ls'''
UpperCamelCase_ = ['''accelerate''', '''tpu-config''']
UpperCamelCase_ = '''cd /usr/share'''
UpperCamelCase_ = '''tests/test_samples/test_command_file.sh'''
UpperCamelCase_ = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Dict = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=A_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , A_ , )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=A_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , A_ , )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=A_ )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , A_ , )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=A_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , A_ , )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=A_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , A_ , )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: int = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=A_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , A_ , )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=A_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , A_ , )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=A_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , A_ , )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: int = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=A_ , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , A_ , )
| 353
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase: typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_UpperCamelCase , max_perimeter + 1 ):
_lowercase: str = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_UpperCamelCase ):
_lowercase: str = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCAmelCase ( _UpperCamelCase = 1_000 ):
"""simple docstring"""
_lowercase: int = pythagorean_triple(_UpperCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 353
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__A : Optional[Any] = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase_ ( ):
'''simple docstring'''
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 450
| 0
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
SCREAMING_SNAKE_CASE = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE = logging.getLogger()
def _lowerCamelCase ( ) -> Dict:
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCAmelCase : int = parser.parse_args()
return args.f
def _lowerCamelCase ( __A : str , __A : int="eval" ) -> Union[str, Any]:
_UpperCAmelCase : List[str] = os.path.join(UpperCAmelCase_ , f'''{split}_results.json''' )
if os.path.exists(UpperCAmelCase_ ):
with open(UpperCAmelCase_ , '''r''' ) as f:
return json.load(UpperCAmelCase_ )
raise ValueError(f'''can\'t find {path}''' )
SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( __lowercase ):
'''simple docstring'''
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Union[str, Any] = f'''\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '''.split()
with patch.object(__A , '''argv''' , __A):
run_flax_glue.main()
_UpperCAmelCase : List[Any] = get_results(__A)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75)
@slow
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[Any] = f'''\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '''.split()
with patch.object(__A , '''argv''' , __A):
run_clm_flax.main()
_UpperCAmelCase : Tuple = get_results(__A)
self.assertLess(result['''eval_perplexity'''] , 100)
@slow
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : str = f'''\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '''.split()
with patch.object(__A , '''argv''' , __A):
run_summarization_flax.main()
_UpperCAmelCase : str = get_results(__A , split='''test''')
self.assertGreaterEqual(result['''test_rouge1'''] , 10)
self.assertGreaterEqual(result['''test_rouge2'''] , 2)
self.assertGreaterEqual(result['''test_rougeL'''] , 7)
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7)
@slow
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Any = f'''\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '''.split()
with patch.object(__A , '''argv''' , __A):
run_mlm_flax.main()
_UpperCAmelCase : List[str] = get_results(__A)
self.assertLess(result['''eval_perplexity'''] , 42)
@slow
def snake_case__ ( self) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Optional[Any] = f'''\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '''.split()
with patch.object(__A , '''argv''' , __A):
run_ta_mlm_flax.main()
_UpperCAmelCase : List[str] = get_results(__A)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42)
@slow
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = 7 if get_gpu_count() > 1 else 2
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : int = f'''\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '''.split()
with patch.object(__A , '''argv''' , __A):
run_flax_ner.main()
_UpperCAmelCase : str = get_results(__A)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75)
self.assertGreaterEqual(result['''eval_f1'''] , 0.3)
@slow
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Union[str, Any] = f'''\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '''.split()
with patch.object(__A , '''argv''' , __A):
run_qa.main()
_UpperCAmelCase : Union[str, Any] = get_results(__A)
self.assertGreaterEqual(result['''eval_f1'''] , 30)
self.assertGreaterEqual(result['''eval_exact'''] , 30)
| 485
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list[int | float] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int | float:
if len(UpperCAmelCase_ ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(UpperCAmelCase_ )
or left < -len(UpperCAmelCase_ )
or right >= len(UpperCAmelCase_ )
or right < -len(UpperCAmelCase_ )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE_ : Optional[int] =(left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE_ : Dict =find_max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE_ : Dict =find_max(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 443
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = 0 ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = row, column
_lowerCAmelCase : Any = [[default_value for c in range(__A )] for r in range(__A )]
def __str__( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
_lowerCAmelCase : List[Any] = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : Any = max(__A , len(str(__A ) ) )
_lowerCAmelCase : Union[str, Any] = F'%{max_element_length}s'
# Make string and return
def single_line(snake_case__ ) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : List[Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__A ) for row_vector in self.array )
return s
def __repr__( self ):
'''simple docstring'''
return str(self )
def a ( self , snake_case__ ):
'''simple docstring'''
if not (isinstance(__A , (list, tuple) ) and len(__A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , snake_case__ ):
'''simple docstring'''
assert self.validate_indicies(__A )
return self.array[loc[0]][loc[1]]
def __setitem__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
assert self.validate_indicies(__A )
_lowerCAmelCase : int = value
def __add__( self , snake_case__ ):
'''simple docstring'''
assert isinstance(__A , __A )
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_lowerCAmelCase : Tuple = self[r, c] + another[r, c]
return result
def __neg__( self ):
'''simple docstring'''
_lowerCAmelCase : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_lowerCAmelCase : Any = -self[r, c]
return result
def __sub__( self , snake_case__ ):
'''simple docstring'''
return self + (-another)
def __mul__( self , snake_case__ ):
'''simple docstring'''
if isinstance(__A , (int, float) ): # Scalar multiplication
_lowerCAmelCase : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_lowerCAmelCase : Union[str, Any] = self[r, c] * another
return result
elif isinstance(__A , __A ): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : str = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : str = F'Unsupported type given for another ({type(__A )})'
raise TypeError(__A )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_lowerCAmelCase : Union[str, Any] = self[r, c]
return result
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
assert isinstance(__A , __A ) and isinstance(__A , __A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : Optional[int] = v.transpose()
_lowerCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Dict = 1
print(f'a^(-1) is {ainv}' )
# u, v
_lowerCAmelCase : List[str] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = 1, 2, -3
_lowerCAmelCase : Tuple = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowercase , _lowercase )}' )
def lowercase ():
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 718
|
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (boundary[1] - boundary[0]) / steps
_lowerCAmelCase : Any = boundary[0]
_lowerCAmelCase : List[str] = boundary[1]
_lowerCAmelCase : Tuple = make_points(_A , _A , _A )
_lowerCAmelCase : Tuple = 0.0
y += (h / 2.0) * f(_A )
for i in x_i:
# print(i)
y += h * f(_A )
y += (h / 2.0) * f(_A )
return y
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = a + h
while x < (b - h):
yield x
_lowerCAmelCase : Any = x + h
def lowercase (_A ): # enter your function here
"""simple docstring"""
_lowerCAmelCase : int = (x - 0) * (x - 0)
return y
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 0.0 # Lower bound of integration
_lowerCAmelCase : Dict = 1.0 # Upper bound of integration
_lowerCAmelCase : Optional[Any] = 10.0 # define number of steps or resolution
_lowerCAmelCase : Optional[int] = [a, b] # define boundary of integration
_lowerCAmelCase : List[Any] = method_a(_A , _A )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 630
| 0
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase__ = 2
class lowerCAmelCase__ :
def __init__( self : int , *, # begin keyword-only arguments
_lowerCamelCase : Optional[int]="<s>" , _lowerCamelCase : List[str]="<pad>" , _lowerCamelCase : Dict="</s>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=None , ):
_snake_case , _snake_case , _snake_case , _snake_case = bos, unk, pad, eos
_snake_case = []
_snake_case = []
_snake_case = {}
_snake_case = self.add_symbol(_lowerCamelCase )
_snake_case = self.add_symbol(_lowerCamelCase )
_snake_case = self.add_symbol(_lowerCamelCase )
_snake_case = self.add_symbol(_lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCamelCase )
_snake_case = len(self.symbols )
def __eq__( self : Optional[int] , _lowerCamelCase : Any ):
return self.indices == other.indices
def __getitem__( self : Optional[Any] , _lowerCamelCase : Optional[int] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[str] ):
return len(self.symbols )
def __contains__( self : Optional[int] , _lowerCamelCase : List[str] ):
return sym in self.indices
@classmethod
def lowercase ( cls : Dict , _lowerCamelCase : Dict ):
_snake_case = cls()
d.add_from_file(_lowerCamelCase )
return d
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=1 , _lowerCamelCase : Tuple=False ):
if word in self.indices and not overwrite:
_snake_case = self.indices[word]
_snake_case = self.count[idx] + n
return idx
else:
_snake_case = len(self.symbols )
_snake_case = idx
self.symbols.append(_lowerCamelCase )
self.count.append(_lowerCamelCase )
return idx
def lowercase ( self : Dict , _lowerCamelCase : Optional[int] ):
return 0
def lowercase ( self : Tuple , _lowerCamelCase : str ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
try:
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(_lowerCamelCase ) )
return
_snake_case = f.readlines()
_snake_case = self._load_meta(_lowerCamelCase )
for line in lines[indices_start_line:]:
try:
_snake_case , _snake_case = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
_snake_case = True
_snake_case , _snake_case = line.rsplit(''' ''' , 1 )
else:
_snake_case = False
_snake_case = int(_lowerCamelCase )
_snake_case = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(_lowerCamelCase ) )
self.add_symbol(_lowerCamelCase , n=_lowerCamelCase , overwrite=_lowerCamelCase )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> int:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_snake_case = dict((re.sub(R'''@@$''' , '''''' , __lowerCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , __lowerCamelCase ), v) for k, v in d.items() )
_snake_case = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
_snake_case = d[k] # restore
return da
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[str] ) -> List[Any]:
# prep
if not os.path.exists(__lowerCamelCase ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_snake_case = os.path.join(__lowerCamelCase , '''checkpoint.pt''' )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )
_snake_case = chkpt['''cfg''']['''model''']
# dicts
_snake_case = os.path.join(__lowerCamelCase , '''dict.txt''' )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
_snake_case = Dictionary.load(__lowerCamelCase )
_snake_case = rewrite_dict_keys(src_dict.indices )
_snake_case = len(__lowerCamelCase )
_snake_case = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# merges_file (bpecodes)
_snake_case = os.path.join(__lowerCamelCase , '''bpecodes''' )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
_snake_case = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
# model config
_snake_case = os.path.join(__lowerCamelCase , '''config.json''' )
_snake_case = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-1_2,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# tokenizer config
_snake_case = os.path.join(__lowerCamelCase , __lowerCamelCase )
_snake_case = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# model
_snake_case = chkpt['''model''']
# remove unneeded keys
_snake_case = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
_snake_case = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
_snake_case = model_state_dict.pop(__lowerCamelCase )
else:
_snake_case = model_state_dict.pop(__lowerCamelCase )
_snake_case = BioGptConfig.from_pretrained(__lowerCamelCase )
_snake_case = BioGptForCausalLM(__lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCamelCase )
# save
_snake_case = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(__lowerCamelCase , __lowerCamelCase )
print('''Conversion is done!''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 224
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : int ):
_snake_case = tempfile.mkdtemp()
_snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_snake_case = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[Any] , **_lowerCamelCase : Tuple ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str , **_lowerCamelCase : Tuple ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : Dict , **_lowerCamelCase : Tuple ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : List[Any] ):
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Tuple ):
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = self.get_image_processor()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def lowercase ( self : int ):
_snake_case = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
_snake_case = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' )
_snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = processor(text=_lowerCamelCase )
_snake_case = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Any ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case = processor.batch_decode(_lowerCamelCase )
_snake_case = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 224
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Any:
'''simple docstring'''
return {key.lstrip("-"): value for key, value in zip(unknown_args[::2] , unknown_args[1::2])}
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Optional[int] = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=_lowerCamelCase)
__UpperCamelCase : str = parser.add_subparsers(help="datasets-cli command helpers")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_lowerCamelCase)
EnvironmentCommand.register_subcommand(_lowerCamelCase)
TestCommand.register_subcommand(_lowerCamelCase)
RunBeamCommand.register_subcommand(_lowerCamelCase)
DummyDataCommand.register_subcommand(_lowerCamelCase)
# Parse args
__UpperCamelCase : Dict = parser.parse_known_args()
if not hasattr(_lowerCamelCase , "func"):
parser.print_help()
exit(1)
__UpperCamelCase : Tuple = parse_unknown_args(_lowerCamelCase)
# Run
__UpperCamelCase : int = args.func(_lowerCamelCase , **_lowerCamelCase)
service.run()
if __name__ == "__main__":
main()
| 717
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Any = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
lowercase : Tuple = parser.parse_args()
lowercase : str = 'cpu'
lowercase : List[Any] = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
lowercase : Any = 'path-to-your-trained-model'
lowercase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : int = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Dict = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Union[str, Any] = torch.rand(1) * 999
lowercase : List[Any] = torch.randn(2, 77, 768)
lowercase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Dict = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Dict = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : Any = 666
lowercase : int = torch.Generator(device).manual_seed(seed)
lowercase : List[str] = {'generator': generator}
if args.steps is not None:
lowercase : List[str] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : Dict = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 94
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case__ : Optional[int] = None
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Dict = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Optional[int] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : int = {
'''google/bigbird-roberta-base''': 4_0_9_6,
'''google/bigbird-roberta-large''': 4_0_9_6,
'''google/bigbird-base-trivia-itc''': 4_0_9_6,
}
snake_case__ : Optional[int] = '''▁'''
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] = BigBirdTokenizer
UpperCamelCase__ : List[str] = ["input_ids", "attention_mask"]
UpperCamelCase__ : List[int] = []
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : int="<unk>" , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : Union[str, Any]="<pad>" , lowerCamelCase_ : Optional[int]="[SEP]" , lowerCamelCase_ : Union[str, Any]="[MASK]" , lowerCamelCase_ : List[Any]="[CLS]" , **lowerCamelCase_ : Tuple , ) ->str:
'''simple docstring'''
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 392
|
snake_case__ : int = '''Input must be a string of 8 numbers plus letter'''
snake_case__ : Optional[int] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowercase ( _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = F'''Expected string as input, found {type(_lowerCAmelCase ).__name__}'''
raise TypeError(_lowerCAmelCase )
UpperCAmelCase__ = spanish_id.replace("""-""" , """""" ).upper()
if len(_lowerCAmelCase ) != 9:
raise ValueError(_lowerCAmelCase )
try:
UpperCAmelCase__ = int(spanish_id_clean[0:8] )
UpperCAmelCase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(_lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392
| 1
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( _A , unittest.TestCase):
lowerCamelCase :Optional[int] = AlbertTokenizer
lowerCamelCase :Dict = AlbertTokenizerFast
lowerCamelCase :Any = True
lowerCamelCase :Dict = True
lowerCamelCase :str = True
def __lowercase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : Dict =AlbertTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self , __lowercase ) -> Tuple:
lowerCamelCase : List[Any] ='''this is a test'''
lowerCamelCase : Union[str, Any] ='''this is a test'''
return input_text, output_text
def __lowercase ( self ) -> Optional[int]:
lowerCamelCase : Tuple ='''<pad>'''
lowerCamelCase : List[str] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __lowercase ( self ) -> List[str]:
lowerCamelCase : Any =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(__lowercase ) , 3_0_0_0_0 )
def __lowercase ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __lowercase ( self ) -> str:
if not self.test_rust_tokenizer:
return
lowerCamelCase : int =self.get_tokenizer()
lowerCamelCase : List[Any] =self.get_rust_tokenizer()
lowerCamelCase : str ='''I was born in 92000, and this is falsé.'''
lowerCamelCase : str =tokenizer.tokenize(__lowercase )
lowerCamelCase : Any =rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowerCamelCase : Optional[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
lowerCamelCase : int =rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowerCamelCase : Optional[int] =self.get_rust_tokenizer()
lowerCamelCase : str =tokenizer.encode(__lowercase )
lowerCamelCase : Optional[int] =rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def __lowercase ( self ) -> Optional[int]:
lowerCamelCase : Tuple =AlbertTokenizer(__lowercase , keep_accents=__lowercase )
lowerCamelCase : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [4_8, 2_5, 2_1, 1_2_8_9] )
lowerCamelCase : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
lowerCamelCase : List[Any] =tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
lowerCamelCase : Optional[int] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def __lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[Any] =AlbertTokenizer(__lowercase )
lowerCamelCase : Tuple =tokenizer.encode('''sequence builders''' )
lowerCamelCase : Any =tokenizer.encode('''multi-sequence build''' )
lowerCamelCase : Union[str, Any] =tokenizer.build_inputs_with_special_tokens(__lowercase )
lowerCamelCase : str =tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __lowercase ( self ) -> int:
# fmt: off
lowerCamelCase : Dict ={'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 262
|
import string
def A__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCamelCase : Optional[Any] =''''''
for i in sequence:
lowerCamelCase : int =ord(SCREAMING_SNAKE_CASE_ )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def A__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCamelCase : Tuple =string.ascii_letters
lowerCamelCase : int =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE_ )] if c in letters else c for c in sequence )
def A__ ( ) -> None:
from timeit import timeit
print('''Running performance benchmarks...''' )
lowerCamelCase : Tuple ='''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=SCREAMING_SNAKE_CASE_ )} seconds" )
print(F"> atbash(): {timeit('atbash(printable)' , setup=SCREAMING_SNAKE_CASE_ )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 262
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCAmelCase =logging.get_logger(__name__)
class __magic_name__ ( _a):
def __init__( self : str ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : float ,**__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = feature_size
UpperCAmelCase = sampling_rate
UpperCAmelCase = padding_value
UpperCAmelCase = kwargs.pop("padding_side" ,"right" )
UpperCAmelCase = kwargs.pop("return_attention_mask" ,__SCREAMING_SNAKE_CASE )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] ,__SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,__SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None ,):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__SCREAMING_SNAKE_CASE ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
UpperCAmelCase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
UpperCAmelCase = processed_features[self.model_input_names[0]]
UpperCAmelCase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__SCREAMING_SNAKE_CASE ) == 0:
if return_attention_mask:
UpperCAmelCase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase = required_input[0]
if isinstance(__SCREAMING_SNAKE_CASE ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = "tf"
elif is_torch_tensor(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = "pt"
elif isinstance(__SCREAMING_SNAKE_CASE ,(int, float, list, tuple, np.ndarray) ):
UpperCAmelCase = "np"
else:
raise ValueError(
f'''type of {first_element} unknown: {type(__SCREAMING_SNAKE_CASE )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
UpperCAmelCase = to_numpy(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase = [to_numpy(__SCREAMING_SNAKE_CASE ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase = self._get_padding_strategies(padding=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = processed_features[self.model_input_names[0]]
UpperCAmelCase = len(__SCREAMING_SNAKE_CASE )
if not all(len(__SCREAMING_SNAKE_CASE ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase = []
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase = self._truncate(
__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,pad_to_multiple_of=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,)
truncated_inputs.append(__SCREAMING_SNAKE_CASE )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase = PaddingStrategy.MAX_LENGTH
UpperCAmelCase = {}
for i in range(__SCREAMING_SNAKE_CASE ):
# padding
UpperCAmelCase = self._pad(
truncated_inputs[i] ,max_length=__SCREAMING_SNAKE_CASE ,padding_strategy=__SCREAMING_SNAKE_CASE ,pad_to_multiple_of=__SCREAMING_SNAKE_CASE ,return_attention_mask=__SCREAMING_SNAKE_CASE ,)
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase = value.astype(np.floataa )
batch_outputs[key].append(__SCREAMING_SNAKE_CASE )
return BatchFeature(__SCREAMING_SNAKE_CASE ,tensor_type=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Union[Dict[str, np.ndarray], BatchFeature] ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,):
UpperCAmelCase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase = len(__SCREAMING_SNAKE_CASE )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__SCREAMING_SNAKE_CASE ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase = np.ones(len(__SCREAMING_SNAKE_CASE ) ,dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase = max_length - len(__SCREAMING_SNAKE_CASE )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase = np.pad(
processed_features["attention_mask"] ,(0, difference) )
UpperCAmelCase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase = np.pad(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,"constant" ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase = np.pad(
processed_features["attention_mask"] ,(difference, 0) )
UpperCAmelCase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase = np.pad(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,"constant" ,constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _UpperCAmelCase ( self : Tuple ,__SCREAMING_SNAKE_CASE : Union[Dict[str, np.ndarray], BatchFeature] ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase = len(__SCREAMING_SNAKE_CASE ) > max_length
if needs_to_be_truncated:
UpperCAmelCase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase = processed_features["attention_mask"][:max_length]
return processed_features
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : str=False ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = PaddingStrategy(__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = padding
else:
UpperCAmelCase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 405
|
import itertools
import math
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = 2
while True:
if is_prime(_lowerCAmelCase ):
yield num
num += 1
def __UpperCamelCase ( _lowerCAmelCase = 1_00_01 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , _lowerCAmelCase ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 405
| 1
|
__magic_name__ ={}
def __UpperCamelCase ( A , A , A ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase__ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase__ = _calculate(days - 1 , lowerCAmelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase__ = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase__ = _calculate(days - 1 , lowerCAmelCase__ , 0 )
UpperCamelCase__ = state_late + state_absent + state_ontime
UpperCamelCase__ = prizestrings
return prizestrings
def __UpperCamelCase ( A = 30 ):
return _calculate(lowerCAmelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 415
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( lowercase_ ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=_lowercase, unet=_lowercase, scheduler=_lowercase )
@torch.no_grad()
def __call__( self, _lowercase = 1, _lowercase = None, _lowercase = 0.0, _lowercase = 50, _lowercase = "pil", _lowercase = True, **_lowercase, ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=_lowercase, )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
SCREAMING_SNAKE_CASE_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE_ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_lowercase, _lowercase )
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(_lowercase, _lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_lowercase, _lowercase, _lowercase, **_lowercase ).prev_sample
# decode the image latents with the VAE
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_lowercase ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0, 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 294
| 0
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
# Construct model
if gpta_config_file == "":
_lowerCamelCase : Union[str, Any] = GPTaConfig()
else:
_lowerCamelCase : Optional[int] = GPTaConfig.from_json_file(lowercase_ )
_lowerCamelCase : Optional[int] = GPTaModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
_lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_lowerCamelCase : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 613
|
import warnings
from .generation import TFGenerationMixin
class __A ( lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" ,lowerCamelCase__ ,)
| 613
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__a = '\\n Text data.\n Second line of data.'
__a = 'file'
@pytest.fixture(scope='''session''' )
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowercase_ = bytes(snake_case__ , '''utf-8''' )
with zstd.open(snake_case__ , '''wb''' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture
def a ( snake_case__: int ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , snake_case__ ) , '''w''' ) as f:
f.write(snake_case__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def a ( snake_case__: List[Any] , snake_case__: Union[str, Any] , snake_case__: Tuple , snake_case__: Union[str, Any] , snake_case__: List[Any] , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowercase_ = input_paths[compression_format]
lowercase_ = tmp_path / '''cache'''
lowercase_ = DownloadConfig(cache_dir=snake_case__ , extract_compressed_file=snake_case__ )
lowercase_ = cached_path(snake_case__ , download_config=snake_case__ )
with open(snake_case__ ) as f:
lowercase_ = f.read()
with open(snake_case__ ) as f:
lowercase_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def a ( snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: List[str] , snake_case__: Dict ):
'''simple docstring'''
lowercase_ = '''custom_cache'''
lowercase_ = '''custom_extracted_dir'''
lowercase_ = tmp_path / '''custom_extracted_path'''
if default_extracted:
lowercase_ = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , snake_case__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(snake_case__ ) )
lowercase_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase_ = xz_file
lowercase_ = (
DownloadConfig(extract_compressed_file=snake_case__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=snake_case__ )
)
lowercase_ = cached_path(snake_case__ , download_config=snake_case__ )
assert Path(snake_case__ ).parent.parts[-2:] == expected
def a ( snake_case__: Any ):
'''simple docstring'''
# absolute path
lowercase_ = str(Path(snake_case__ ).resolve() )
assert cached_path(snake_case__ ) == text_file
# relative path
lowercase_ = str(Path(snake_case__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(snake_case__ ) == text_file
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
# absolute path
lowercase_ = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(snake_case__ ):
cached_path(snake_case__ )
# relative path
lowercase_ = '''./__missing_file__.txt'''
with pytest.raises(snake_case__ ):
cached_path(snake_case__ )
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(snake_case__ ) as f:
lowercase_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case__ ):
http_get('''https://huggingface.co''' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case__ )
def a ( snake_case__: Tuple ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=snake_case__ )
with pytest.raises(snake_case__ ):
fsspec_head('''s3://huggingface.co''' )
| 97
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693
| 0
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : str = TaConfig.from_json_file(_lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : str = TaForConditionalGeneration(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 701
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 0
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *snake_case :Union[str, Any], **snake_case :Tuple):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', )
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase =image_classifier(snake_case, candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case), [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
], )
_lowercase =image_classifier([image] * 5, candidate_labels=['A', 'B', 'C'], batch_size=2)
self.assertEqual(
nested_simplify(snake_case), [
[
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
],
[
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
],
[
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
],
[
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
],
[
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
],
], )
@require_tf
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', framework='tf')
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase =image_classifier(snake_case, candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(snake_case), [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}], )
_lowercase =image_classifier([image] * 5, candidate_labels=['A', 'B', 'C'], batch_size=2)
self.assertEqual(
nested_simplify(snake_case), [
[
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
],
[
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
],
[
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
],
[
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
],
[
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
{'score': 0.3_3_3, 'label': ANY(snake_case)},
],
], )
@slow
@require_torch
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =pipeline(
task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', )
# This is an image of 2 cats with remotes and no planes
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase =image_classifier(snake_case, candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(snake_case), [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
], )
_lowercase =image_classifier([image] * 5, candidate_labels=['cat', 'plane', 'remote'], batch_size=2)
self.assertEqual(
nested_simplify(snake_case), [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5, )
@slow
@require_tf
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =pipeline(
task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', framework='tf')
# This is an image of 2 cats with remotes and no planes
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase =image_classifier(snake_case, candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(snake_case), [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
], )
_lowercase =image_classifier([image] * 5, candidate_labels=['cat', 'plane', 'remote'], batch_size=2)
self.assertEqual(
nested_simplify(snake_case), [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5, )
| 181
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar("T")
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :List[Any], snake_case :bool = True):
"""simple docstring"""
_lowercase ={} # dictionary of lists
_lowercase =directed
def UpperCamelCase__ ( self :Optional[Any], snake_case :T, snake_case :T):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
self.adj_list[destination_vertex].append(snake_case)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
_lowercase =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case)
_lowercase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowercase =[destination_vertex]
_lowercase =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case)
_lowercase =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowercase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowercase =[destination_vertex]
_lowercase =[]
return self
def __repr__( self :Optional[int]):
"""simple docstring"""
return pformat(self.adj_list)
| 181
| 1
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
"""simple docstring"""
def __init__( self : List[str] , _lowercase : List[str] , _lowercase : List[str]=2 , _lowercase : List[str]=8 , _lowercase : str=True , _lowercase : List[Any]=True , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=True , _lowercase : List[str]=99 , _lowercase : Optional[Any]=16 , _lowercase : Tuple=5 , _lowercase : Dict=2 , _lowercase : Optional[int]=36 , _lowercase : Tuple="gelu" , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[str]=512 , _lowercase : List[Any]=16 , _lowercase : Optional[Any]=2 , _lowercase : List[Any]=0.02 , _lowercase : Tuple=3 , _lowercase : Dict=4 , _lowercase : Tuple=None , ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = parent
_UpperCamelCase: Dict = batch_size
_UpperCamelCase: int = seq_length
_UpperCamelCase: Optional[int] = is_training
_UpperCamelCase: Any = use_input_mask
_UpperCamelCase: Dict = use_token_type_ids
_UpperCamelCase: Tuple = use_labels
_UpperCamelCase: str = vocab_size
_UpperCamelCase: int = hidden_size
_UpperCamelCase: Any = num_hidden_layers
_UpperCamelCase: int = num_attention_heads
_UpperCamelCase: int = intermediate_size
_UpperCamelCase: Union[str, Any] = hidden_act
_UpperCamelCase: str = hidden_dropout_prob
_UpperCamelCase: Any = attention_probs_dropout_prob
_UpperCamelCase: List[Any] = max_position_embeddings
_UpperCamelCase: Tuple = type_vocab_size
_UpperCamelCase: List[str] = type_sequence_label_size
_UpperCamelCase: Any = initializer_range
_UpperCamelCase: Union[str, Any] = num_labels
_UpperCamelCase: Tuple = num_choices
_UpperCamelCase: str = scope
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase: Optional[int] = None
if self.use_input_mask:
_UpperCamelCase: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase: List[Any] = None
if self.use_token_type_ids:
_UpperCamelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase: Optional[Any] = None
_UpperCamelCase: Dict = None
_UpperCamelCase: int = None
if self.use_labels:
_UpperCamelCase: Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase: str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase: Any = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase: Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.get_config()
_UpperCamelCase: Any = 300
return config
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
): str = self.prepare_config_and_inputs()
_UpperCamelCase: Optional[int] = True
_UpperCamelCase: Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase: Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase ( self : Optional[int] , _lowercase : Tuple , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : int , _lowercase : Optional[int] , _lowercase : Tuple ):
"""simple docstring"""
_UpperCamelCase: str = MraModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: Optional[Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
_UpperCamelCase: Optional[int] = model(_lowercase , token_type_ids=_lowercase )
_UpperCamelCase: Optional[int] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Any , _lowercase : int , _lowercase : List[Any] , ):
"""simple docstring"""
_UpperCamelCase: Any = True
_UpperCamelCase: Tuple = MraModel(_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: int = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
_UpperCamelCase: Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , encoder_hidden_states=_lowercase , )
_UpperCamelCase: int = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , _lowercase : List[Any] , _lowercase : int , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : str , _lowercase : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: int = MraForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: int = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[str] , _lowercase : str , _lowercase : Tuple , _lowercase : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Any = MraForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: List[Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: str = self.num_labels
_UpperCamelCase: str = MraForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , _lowercase : str , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : int , _lowercase : Dict , _lowercase : Tuple ):
"""simple docstring"""
_UpperCamelCase: List[Any] = self.num_labels
_UpperCamelCase: List[Any] = MraForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , _lowercase : Any , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : str , _lowercase : Tuple ):
"""simple docstring"""
_UpperCamelCase: Any = self.num_choices
_UpperCamelCase: int = MraForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase: List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase: Dict = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: List[str] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
): Dict = config_and_inputs
_UpperCamelCase: Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : int = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : int = ()
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = MraModelTester(self )
_UpperCamelCase: Optional[Any] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase: int = type
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase: List[str] = MraModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
return
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase: Any = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_UpperCamelCase: Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCamelCase: List[str] = model(_lowercase )[0]
_UpperCamelCase: Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowercase )
_UpperCamelCase: Optional[Any] = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_UpperCamelCase: str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCamelCase: List[Any] = model(_lowercase )[0]
_UpperCamelCase: List[str] = 50_265
_UpperCamelCase: Union[str, Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowercase )
_UpperCamelCase: str = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_UpperCamelCase: Optional[Any] = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_UpperCamelCase: Optional[Any] = model(_lowercase )[0]
_UpperCamelCase: str = 50_265
_UpperCamelCase: int = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowercase )
_UpperCamelCase: Any = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 264
|
def lowerCAmelCase_ ( lowercase: int = 10**9 ) -> int:
'''simple docstring'''
_UpperCamelCase: List[Any] = 1
_UpperCamelCase: Dict = 2
_UpperCamelCase: Tuple = 0
_UpperCamelCase: int = 0
_UpperCamelCase: Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_UpperCamelCase: str = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 264
| 1
|
def lowerCamelCase__ ( _a):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a) # No of vertices in graph
SCREAMING_SNAKE_CASE : str = [0] * n
SCREAMING_SNAKE_CASE : str = [False] * n
def dfs(_a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_a , _a , _a , id_)
SCREAMING_SNAKE_CASE : List[Any] = min(low[at] , low[to])
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
SCREAMING_SNAKE_CASE : Tuple = min(low[at] , low[to])
SCREAMING_SNAKE_CASE : list[tuple[int, int]] = []
for i in range(_a):
if not visited[i]:
dfs(_a , -1 , _a , id_)
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25
| 1
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
a__ : Optional[Any] = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
}
class lowercase_ :
def __init__( self , a = 14 ):
if group not in primes:
raise ValueError("Unsupported Group" )
UpperCamelCase__ = primes[group]["prime"]
UpperCamelCase__ = primes[group]["generator"]
UpperCamelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def __a ( self ):
return hex(self.__private_key )[2:]
def __a ( self ):
UpperCamelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(a )[2:]
def __a ( self , a ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(a , (self.prime - 1) // 2 , self.prime ) == 1
)
def __a ( self , a ):
UpperCamelCase__ = int(a , base=16 )
if not self.is_valid_public_key(a ):
raise ValueError("Invalid public key" )
UpperCamelCase__ = pow(a , self.__private_key , self.prime )
return shaaaa(str(a ).encode() ).hexdigest()
@staticmethod
def __a ( a , a ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(a , (prime - 1) // 2 , a ) == 1
)
@staticmethod
def __a ( a , a , a = 14 ):
UpperCamelCase__ = int(a , base=16 )
UpperCamelCase__ = int(a , base=16 )
UpperCamelCase__ = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(a , a ):
raise ValueError("Invalid public key" )
UpperCamelCase__ = pow(a , a , a )
return shaaaa(str(a ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a__ : Any = logging.get_logger(__name__)
class lowercase_ ( a__ ):
def __init__( self , *a , **a ):
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a , )
super().__init__(*a , **a )
| 223
| 1
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> str:
if hor == 1_28:
snake_case = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
snake_case = (32, 1_28, 2_56)
snake_case = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
snake_case = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
snake_case = (32, 64, 1_28, 2_56)
snake_case = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
snake_case = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
snake_case = model.state_dict()
snake_case = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_55_36,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
snake_case = UNetaDModel(**_UpperCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
snake_case = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
snake_case = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 1_28, 2_56),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_55_36,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
snake_case = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
snake_case = model
snake_case = UNetaDModel(**_UpperCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
snake_case = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 369
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
return " ".join(
"".join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 69
| 0
|
import operator
def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :bool = False , UpperCAmelCase__ :list | None = None ):
'''simple docstring'''
a = operator.lt if reverse else operator.gt
a = solution or []
if not arr:
return solution
a = [arr.pop(0 )]
for i, item in enumerate(UpperCAmelCase__ ):
if _operator(UpperCAmelCase__ , sublist[-1] ):
sublist.append(UpperCAmelCase__ )
arr.pop(UpperCAmelCase__ )
# merging sublist into solution list
if not solution:
solution.extend(UpperCAmelCase__ )
else:
while sublist:
a = sublist.pop(0 )
for i, xx in enumerate(UpperCAmelCase__ ):
if not _operator(UpperCAmelCase__ , UpperCAmelCase__ ):
solution.insert(UpperCAmelCase__ , UpperCAmelCase__ )
break
else:
solution.append(UpperCAmelCase__ )
strand_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 705
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 32
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 592
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__snake_case = Vector()
def a (self : str ):
"""simple docstring"""
__snake_case = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(a__ ) , '''(0,0,0,0,0,1)''' )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3, 4] )
self.assertEqual(len(a__ ) , 4 )
def a (self : Dict ):
"""simple docstring"""
__snake_case = Vector([1, 2] )
__snake_case = Vector([1, 2, 3, 4, 5] )
__snake_case = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__snake_case = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def a (self : Dict ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([2, -1, 4] ) # for test of dot product
__snake_case = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def a (self : Optional[int] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def a (self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , a__ , a__ ) ) , '''(3,4,7)''' )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 0, 0, 0, 0, 0] )
__snake_case = x.copy()
self.assertEqual(str(a__ ) , str(a__ ) )
def a (self : Dict ):
"""simple docstring"""
__snake_case = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(a__ ) , '''(0,1,0)''' )
def a (self : int ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(a__ ) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(a__ , a__ ) )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(a__ , a__ ) )
def a (self : str ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__snake_case = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def a (self : Any ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(a__ ) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def a (self : Optional[int] ):
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 592
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for param in module.parameters():
a_ =False
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
a_ ="mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =plt.imshow(lowercase__ )
fig.axes.get_xaxis().set_visible(lowercase__ )
fig.axes.get_yaxis().set_visible(lowercase__ )
plt.show()
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =datetime.now()
a_ =current_time.strftime("%H:%M:%S" )
return timestamp
| 41
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
# Initialise PyTorch model
_a = LxmertConfig.from_json_file(lowerCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
_a = LxmertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__snake_case : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 131
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ) -> List[Any]:
_a = 1
_a = 3
_a = (3_2, 3_2)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
@property
def __lowerCAmelCase ( self ) -> int:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=snake_case_ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type="v_prediction" )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , )
_a = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = torch.Generator(device=snake_case_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_a = output.images
_a = torch.Generator(device=snake_case_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=snake_case_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
_a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_a = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> int:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type="v_prediction" )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , )
_a = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_a = output.images
assert image.shape[0] == 2
_a = torch.Generator(device=snake_case_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_a = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __lowerCAmelCase ( self ) -> Any:
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type="v_prediction" )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
_a = unet.half()
_a = text_encoder.half()
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , )
_a = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , ).images
_a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionUpscalePipeline.from_pretrained(snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_a = "a cat sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionUpscalePipeline.from_pretrained(
snake_case_ , torch_dtype=torch.floataa , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_a = "a cat sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionUpscalePipeline.from_pretrained(
snake_case_ , torch_dtype=torch.floataa , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = "a cat sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=5 , output_type="np" , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 131
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCamelCase = True
except ImportError:
UpperCamelCase = False
try:
from torch.hub import _get_torch_home
UpperCamelCase = _get_torch_home()
except ImportError:
UpperCamelCase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
UpperCamelCase = os.path.join(torch_cache_home, """transformers""")
UpperCamelCase = """https://cdn.huggingface.co"""
UpperCamelCase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
UpperCamelCase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
UpperCamelCase = os.path.join(PATH, """config.yaml""")
UpperCamelCase = os.path.join(PATH, """attributes.txt""")
UpperCamelCase = os.path.join(PATH, """objects.txt""")
UpperCamelCase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
UpperCamelCase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
UpperCamelCase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
UpperCamelCase = """pytorch_model.bin"""
UpperCamelCase = """config.yaml"""
def lowerCAmelCase ( UpperCamelCase_: List[str]=OBJECTS , UpperCamelCase_: str=ATTRIBUTES ) -> str:
'''simple docstring'''
_a = []
with open(UpperCamelCase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_a = []
with open(UpperCamelCase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase ( UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a = OrderedDict()
with open(UpperCamelCase_ , "rb" ) as f:
_a = pkl.load(UpperCamelCase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_a = ckp.pop(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
_a = torch.tensor(UpperCamelCase_ )
else:
assert isinstance(UpperCamelCase_ , torch.tensor ), type(UpperCamelCase_ )
_a = v
return r
class lowercase_ :
A__ : Tuple = {}
def __init__( self , a_ , a_ = "root" , a_=0 ) ->Optional[Any]:
'''simple docstring'''
_a = name
_a = level
_a = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_a = copy.deepcopy(a_ )
_a = copy.deepcopy(a_ )
if isinstance(a_ , a_ ):
_a = Config(a_ , name=a_ , level=level + 1 )
_a = v
setattr(self , a_ , a_ )
_a = d
def __repr__( self ) ->Optional[int]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , a_ , a_ ) ->str:
'''simple docstring'''
_a = val
_a = val
_a = key.split("." )
_a = len(a_ ) - 1
_a = self._pointer
if len(a_ ) > 1:
for i, l in enumerate(a_ ):
if hasattr(self , a_ ) and isinstance(getattr(self , a_ ) , a_ ):
setattr(getattr(self , a_ ) , ".".join(levels[i:] ) , a_ )
if l == last_level:
_a = val
else:
_a = pointer[l]
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
return self._pointer
def lowerCamelCase__ ( self , a_ , a_ ) ->Any:
'''simple docstring'''
with open(f'''{file_name}''' , "w" ) as stream:
dump(a_ , a_ )
def lowerCamelCase__ ( self , a_ , a_ ) ->int:
'''simple docstring'''
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(a_ , a_ )
@staticmethod
def lowerCamelCase__ ( a_ ) ->Union[str, Any]:
'''simple docstring'''
with open(a_ ) as stream:
_a = load(a_ , Loader=a_ )
return data
def __str__( self ) ->List[Any]:
'''simple docstring'''
_a = " "
if self._name != "root":
_a = f'''{t * (self._level-1)}{self._name}:\n'''
else:
_a = ""
_a = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(a_ , a_ ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(a_ ).__name__})\n'''
_a = level
return r[:-1]
@classmethod
def lowerCamelCase__ ( cls , a_ , **a_ ) ->Tuple:
'''simple docstring'''
_a , _a = cls.get_config_dict(a_ , **a_ )
return cls(a_ )
@classmethod
def lowerCamelCase__ ( cls , a_ , **a_ ) ->List[Any]:
'''simple docstring'''
_a = kwargs.pop("cache_dir" , a_ )
_a = kwargs.pop("force_download" , a_ )
_a = kwargs.pop("resume_download" , a_ )
_a = kwargs.pop("proxies" , a_ )
_a = kwargs.pop("local_files_only" , a_ )
if os.path.isdir(a_ ):
_a = os.path.join(a_ , a_ )
elif os.path.isfile(a_ ) or is_remote_url(a_ ):
_a = pretrained_model_name_or_path
else:
_a = hf_bucket_url(a_ , filename=a_ , use_cdn=a_ )
try:
# Load from URL or cache if already cached
_a = cached_path(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_a = Config.load_yaml(a_ )
except EnvironmentError:
_a = "Can't load config for"
raise EnvironmentError(a_ )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(a_ ), kwargs
def lowerCAmelCase ( UpperCamelCase_: Optional[int] ) -> Tuple:
'''simple docstring'''
_a = torch.load("dump.pt" , map_location=in_tensor.device )
_a = in_tensor.numpy()
_a = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(UpperCamelCase_ , UpperCamelCase_ , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(UpperCamelCase_ , UpperCamelCase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowerCAmelCase ( UpperCamelCase_: str ) -> List[str]:
'''simple docstring'''
_a = urlparse(UpperCamelCase_ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Dict=True ) -> str:
'''simple docstring'''
_a = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_a = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def lowerCAmelCase ( UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Union[str, Any]=None , ) -> List[str]:
'''simple docstring'''
_a = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
ua += "; " + "; ".join("{}/{}".format(UpperCamelCase_ , UpperCamelCase_ ) for k, v in user_agent.items() )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
ua += "; " + user_agent
_a = {"user-agent": ua}
if resume_size > 0:
_a = "bytes=%d-" % (resume_size,)
_a = requests.get(UpperCamelCase_ , stream=UpperCamelCase_ , proxies=UpperCamelCase_ , headers=UpperCamelCase_ )
if response.status_code == 416: # Range not satisfiable
return
_a = response.headers.get("Content-Length" )
_a = resume_size + int(UpperCamelCase_ ) if content_length is not None else None
_a = tqdm(
unit="B" , unit_scale=UpperCamelCase_ , total=UpperCamelCase_ , initial=UpperCamelCase_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(UpperCamelCase_ ) )
temp_file.write(UpperCamelCase_ )
progress.close()
def lowerCAmelCase ( UpperCamelCase_: Optional[int] , UpperCamelCase_: str=None , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: int=10 , UpperCamelCase_: List[str]=False , UpperCamelCase_: int=None , UpperCamelCase_: List[str]=False , ) -> int:
'''simple docstring'''
if cache_dir is None:
_a = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
_a = None
if not local_files_only:
try:
_a = requests.head(UpperCamelCase_ , allow_redirects=UpperCamelCase_ , proxies=UpperCamelCase_ , timeout=UpperCamelCase_ )
if response.status_code == 200:
_a = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_a = url_to_filename(UpperCamelCase_ , UpperCamelCase_ )
# get cache path to put the file
_a = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(UpperCamelCase_ ):
return cache_path
else:
_a = [
file
for file in fnmatch.filter(os.listdir(UpperCamelCase_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(UpperCamelCase_ ) > 0:
return os.path.join(UpperCamelCase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(UpperCamelCase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_a = cache_path + ".lock"
with FileLock(UpperCamelCase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(UpperCamelCase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_a = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(UpperCamelCase_ , "a+b" ) as f:
yield f
_a = _resumable_file_manager
if os.path.exists(UpperCamelCase_ ):
_a = os.stat(UpperCamelCase_ ).st_size
else:
_a = 0
else:
_a = partial(tempfile.NamedTemporaryFile , dir=UpperCamelCase_ , delete=UpperCamelCase_ )
_a = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , UpperCamelCase_ , temp_file.name , )
http_get(
UpperCamelCase_ , UpperCamelCase_ , proxies=UpperCamelCase_ , resume_size=UpperCamelCase_ , user_agent=UpperCamelCase_ , )
os.replace(temp_file.name , UpperCamelCase_ )
_a = {"url": url, "etag": etag}
_a = cache_path + ".json"
with open(UpperCamelCase_ , "w" ) as meta_file:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return cache_path
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Dict=None ) -> Any:
'''simple docstring'''
_a = url.encode("utf-8" )
_a = shaaaa(UpperCamelCase_ )
_a = url_hash.hexdigest()
if etag:
_a = etag.encode("utf-8" )
_a = shaaaa(UpperCamelCase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowerCAmelCase ( UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Tuple=False , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: str=False , UpperCamelCase_: Dict=None , UpperCamelCase_: int=False , UpperCamelCase_: Dict=False , UpperCamelCase_: Any=False , ) -> List[str]:
'''simple docstring'''
if cache_dir is None:
_a = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
if is_remote_url(UpperCamelCase_ ):
# URL, so get it from the cache (downloading if necessary)
_a = get_from_cache(
UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , user_agent=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
elif os.path.exists(UpperCamelCase_ ):
# File, and it exists.
_a = url_or_filename
elif urlparse(UpperCamelCase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(UpperCamelCase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(UpperCamelCase_ ) )
if extract_compressed_file:
if not is_zipfile(UpperCamelCase_ ) and not tarfile.is_tarfile(UpperCamelCase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_a , _a = os.path.split(UpperCamelCase_ )
_a = output_file.replace("." , "-" ) + "-extracted"
_a = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ) and os.listdir(UpperCamelCase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_a = output_path + ".lock"
with FileLock(UpperCamelCase_ ):
shutil.rmtree(UpperCamelCase_ , ignore_errors=UpperCamelCase_ )
os.makedirs(UpperCamelCase_ )
if is_zipfile(UpperCamelCase_ ):
with ZipFile(UpperCamelCase_ , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase_ )
zip_file.close()
elif tarfile.is_tarfile(UpperCamelCase_ ):
_a = tarfile.open(UpperCamelCase_ )
tar_file.extractall(UpperCamelCase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(UpperCamelCase_ ) )
return output_path_extracted
return output_path
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str="," ) -> Tuple:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
with open(UpperCamelCase_ ) as f:
_a = eval(f.read() )
else:
_a = requests.get(UpperCamelCase_ )
try:
_a = requests.json()
except Exception:
_a = req.content.decode()
assert data is not None, "could not connect"
try:
_a = eval(UpperCamelCase_ )
except Exception:
_a = data.split("\n" )
req.close()
return data
def lowerCAmelCase ( UpperCamelCase_: Dict ) -> Union[str, Any]:
'''simple docstring'''
_a = requests.get(UpperCamelCase_ )
_a = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase ( UpperCamelCase_: int ) -> Tuple:
'''simple docstring'''
_a = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as stream:
_a = pkl.load(UpperCamelCase_ )
_a = weights.pop("model" )
_a = {}
for k, v in model.items():
_a = torch.from_numpy(UpperCamelCase_ )
if "running_var" in k:
_a = torch.tensor([0] )
_a = k.replace("running_var" , "num_batches_tracked" )
_a = zero
return new
def lowerCAmelCase ( ) -> str:
'''simple docstring'''
print(f'''{os.path.abspath(os.path.join(UpperCamelCase_ , os.pardir ) )}/demo.ipynb''' )
def lowerCAmelCase ( UpperCamelCase_: List[str] , UpperCamelCase_: List[str]="RGB" ) -> List[Any]:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
_a = cva.imread(UpperCamelCase_ )
else:
_a = get_image_from_url(UpperCamelCase_ )
assert img is not None, f'''could not connect to: {im}'''
_a = cva.cvtColor(UpperCamelCase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_a = img[:, :, ::-1]
return img
def lowerCAmelCase ( UpperCamelCase_: Any , UpperCamelCase_: str=1 ) -> Any:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ))
| 612
| 0
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE :List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
UpperCamelCase_ :int = 1_0_0_0_0
UpperCamelCase_ :Optional[List[str]] = None
UpperCamelCase_ :Optional[datasets.Features] = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
UpperCamelCase_ :str = ParquetConfig
def UpperCAmelCase_ ( self )-> Dict:
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ ( self , _lowercase )-> Any:
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCamelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowercase , (str, list, tuple) ):
UpperCamelCase_ = data_files
if isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase_ = [dl_manager.iter_files(_lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCamelCase_ = []
for split_name, files in data_files.items():
if isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase_ = [dl_manager.iter_files(_lowercase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowercase ):
with open(_lowercase , "rb" ) as f:
UpperCamelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(_lowercase ) )
break
splits.append(datasets.SplitGenerator(name=_lowercase , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase_ ( self , _lowercase )-> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase_ = table_cast(_lowercase , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase_ ( self , _lowercase )-> List[Any]:
UpperCamelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowercase ) ):
with open(_lowercase , "rb" ) as f:
UpperCamelCase_ = pq.ParquetFile(_lowercase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCamelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(_lowercase )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(_lowercase )}: {e}" )
raise
| 628
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :List[Any] = LayoutLMTokenizer
UpperCamelCase_ :Dict = LayoutLMTokenizerFast
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :Dict = True
def UpperCAmelCase_ ( self )-> str:
super().setUp()
UpperCamelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self , **_lowercase )-> List[str]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = "UNwant\u00E9d,running"
UpperCamelCase_ = "unwanted, running"
return input_text, output_text
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.tokenizer_class(self.vocab_file )
UpperCamelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_lowercase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self )-> Dict:
pass
| 628
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : Dict , __a : Any=7 , __a : str=3 , __a : List[Any]=3_0 , __a : List[str]=4_0_0 , __a : List[Any]=True , __a : List[Any]=None , __a : int=True , __a : str=[0.5, 0.5, 0.5] , __a : Dict=[0.5, 0.5, 0.5] , __a : Optional[Any]=True , __a : Tuple=1 / 2_5_5 , __a : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : int = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Any = batch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : List[str] = min_resolution
snake_case__ : Dict = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : Any = size
snake_case__ : str = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Tuple = image_std
snake_case__ : Tuple = do_rescale
snake_case__ : str = rescale_factor
snake_case__ : Tuple = do_pad
def lowercase ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase ( self : List[str] , __a : Optional[Any] , __a : Optional[int]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__a , Image.Image ):
snake_case__ , snake_case__ : List[Any] = image.size
else:
snake_case__ , snake_case__ : str = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
snake_case__ : Dict = self.size["""shortest_edge"""]
elif w > h:
snake_case__ : int = self.size["""shortest_edge"""]
snake_case__ : int = int(self.size["""shortest_edge"""] * w / h )
else:
snake_case__ : Tuple = self.size["""shortest_edge"""]
snake_case__ : int = self.size["""shortest_edge"""]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__ , snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Union[str, Any] = max(__a , key=lambda __a : item[0] )[0]
snake_case__ : Union[str, Any] = max(__a , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ (__snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowercase ( self : Optional[int] ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def lowercase ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : Dict ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """image_mean""" ) )
self.assertTrue(hasattr(__a , """image_std""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
def lowercase ( self : Optional[int] ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __a )
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__a )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , __a )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : Optional[int] ):
# Initialize image_processing
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case__ , snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a )
snake_case__ : Optional[Any] = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case__ , snake_case__ : str = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : str = image_processing(__a , return_tensors="""pt""" ).pixel_values
snake_case__ , snake_case__ : int = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase ( self : str ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
snake_case__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case__ , snake_case__ : Any = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : List[str] = image_processing(__a , return_tensors="""pt""" ).pixel_values
snake_case__ , snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase ( self : Tuple ):
# prepare image and target
snake_case__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Tuple = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
snake_case__ : Any = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
snake_case__ : Optional[int] = image_processing(images=__a , annotations=__a , return_tensors="""pt""" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __a )
snake_case__ : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __a , atol=1e-4 ) )
# verify area
snake_case__ : List[str] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __a ) )
# verify boxes
snake_case__ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __a )
snake_case__ : Union[str, Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __a , atol=1e-3 ) )
# verify image_id
snake_case__ : Tuple = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __a ) )
# verify is_crowd
snake_case__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __a ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __a ) )
# verify orig_size
snake_case__ : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __a ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __a ) )
@slow
def lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Dict = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
snake_case__ : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
snake_case__ : Dict = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
snake_case__ : List[Any] = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors="""pt""" )
# verify pixel values
snake_case__ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __a )
snake_case__ : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __a , atol=1e-4 ) )
# verify area
snake_case__ : List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __a ) )
# verify boxes
snake_case__ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __a )
snake_case__ : Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __a , atol=1e-3 ) )
# verify image_id
snake_case__ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __a ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __a ) )
# verify class_labels
snake_case__ : List[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __a ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __a )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __a ) )
# verify size
snake_case__ : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __a ) )
| 127
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_: Union[str, Any] = TypeVar('T')
class lowercase__ (Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] , __a : list[T] , __a : Callable[[T, T], T] ):
snake_case__ : Any | T = None
snake_case__ : int = len(__a )
snake_case__ : list[T] = [any_type for _ in range(self.N )] + arr
snake_case__ : Tuple = fnc
self.build()
def lowercase ( self : int ):
for p in range(self.N - 1 , 0 , -1 ):
snake_case__ : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Optional[Any] , __a : int , __a : T ):
p += self.N
snake_case__ : Optional[int] = v
while p > 1:
snake_case__ : int = p // 2
snake_case__ : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : int , __a : int , __a : int ): # noqa: E741
snake_case__ , snake_case__ : List[Any] = l + self.N, r + self.N
snake_case__ : T | None = None
while l <= r:
if l % 2 == 1:
snake_case__ : List[str] = self.st[l] if res is None else self.fn(__a , self.st[l] )
if r % 2 == 0:
snake_case__ : Any = self.st[r] if res is None else self.fn(__a , self.st[r] )
snake_case__ , snake_case__ : Tuple = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_: List[str] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase_: Optional[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase_: Optional[Any] = SegmentTree(test_array, min)
lowercase_: Any = SegmentTree(test_array, max)
lowercase_: Optional[int] = SegmentTree(test_array, lambda a, b: a + b)
def _lowercase ( ):
"""simple docstring"""
for i in range(len(UpperCAmelCase_)):
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_)):
snake_case__ : Tuple = reduce(UpperCAmelCase_ , test_array[i : j + 1])
snake_case__ : int = reduce(UpperCAmelCase_ , test_array[i : j + 1])
snake_case__ : Union[str, Any] = reduce(lambda UpperCAmelCase_ , UpperCAmelCase_: a + b , test_array[i : j + 1])
assert min_range == min_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_)
assert max_range == max_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_)
assert sum_range == sum_segment_tree.query(UpperCAmelCase_ , UpperCAmelCase_)
test_all_segments()
for index, value in test_updates.items():
lowercase_: Optional[int] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 127
| 1
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__a : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=18 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"height": 20, "width": 20}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = do_convert_rgb
UpperCamelCase = [512, 1024, 2048, 4096]
UpperCamelCase = patch_size if patch_size is not None else {"height": 16, "width": 16}
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase = PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = PixaStructImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "do_convert_rgb" ) )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase = 2048
UpperCamelCase = image_processor(UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase_ ):
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches
UpperCamelCase = "Hello"
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase = PixaStructImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase = 3
@property
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "do_convert_rgb" ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 606
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43
| 0
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_A = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCamelCase : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(a_ )
lowerCamelCase , lowerCamelCase : Dict = XLMProphetNetForConditionalGeneration.from_pretrained(
a_, output_loading_info=a_ )
else:
lowerCamelCase : Optional[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(a_ )
lowerCamelCase , lowerCamelCase : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
a_, output_loading_info=a_ )
lowerCamelCase : List[Any] = ['key_proj', 'value_proj', 'query_proj']
lowerCamelCase : int = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
lowerCamelCase : int = key.split('.' )
if attributes[0] == "lm_head":
lowerCamelCase : List[Any] = prophet
lowerCamelCase : str = prophet_old
else:
lowerCamelCase : Tuple = prophet.prophetnet
lowerCamelCase : List[Any] = prophet_old.model
lowerCamelCase : Optional[int] = False
for attribute in attributes:
if attribute in mapping:
lowerCamelCase : Tuple = mapping[attribute]
if not hasattr(a_, a_ ) and len(a_ ) > 0:
lowerCamelCase : Tuple = attribute
elif hasattr(a_, a_ ):
lowerCamelCase : Any = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCamelCase : Optional[int] = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowerCamelCase : Dict = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCamelCase : Union[str, Any] = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowerCamelCase : Union[str, Any] = True
break
elif attribute in special_keys and hasattr(a_, 'in_proj_weight' ):
lowerCamelCase : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
lowerCamelCase : Union[str, Any] = getattr(a_, a_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCamelCase : int = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCamelCase : Tuple = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCamelCase : Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCamelCase : Tuple = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCamelCase : List[str] = True
break
if attribute.isdigit():
lowerCamelCase : Tuple = model[int(a_ )]
lowerCamelCase : List[str] = old_model[int(a_ )]
else:
lowerCamelCase : str = getattr(a_, a_ )
if old_attribute == "":
lowerCamelCase : List[str] = old_model
else:
if not hasattr(a_, a_ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowerCamelCase : List[Any] = getattr(a_, a_ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(a_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_A = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 133
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(a_ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
for j in range(a_ ):
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : str = [float('inf' )] * vertex_count
lowerCamelCase : str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(a_ ):
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowerCamelCase : Dict = distance[u] + w
lowerCamelCase : Any = check_negative_cycle(a_, a_, a_ )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = int(input('Enter number of vertices: ').strip())
_A = int(input('Enter number of edges: ').strip())
_A = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_A , _A , _A = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_A = {'src': src, 'dst': dest, 'weight': weight}
_A = int(input('\nEnter shortest path source:').strip())
_A = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 133
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
_lowercase = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''facebook/bart-base''': 1_024,
'''facebook/bart-large''': 1_024,
'''facebook/bart-large-mnli''': 1_024,
'''facebook/bart-large-cnn''': 1_024,
'''facebook/bart-large-xsum''': 1_024,
'''yjernite/bart_eli5''': 1_024,
}
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : List[str] = ["""input_ids""", """attention_mask"""]
_lowerCamelCase : Union[str, Any] = BartTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) -> str:
'''simple docstring'''
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase , pre_tok_state.pop("type" ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowercase = "post_processor"
__lowercase = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
__lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowercase = tuple(state["sep"] )
if "cls" in state:
__lowercase = tuple(state["cls"] )
__lowercase = False
if state.get("add_prefix_space" , _lowerCamelCase ) != add_prefix_space:
__lowercase = add_prefix_space
__lowercase = True
if state.get("trim_offsets" , _lowerCamelCase ) != trim_offsets:
__lowercase = trim_offsets
__lowercase = True
if changes_to_apply:
__lowercase = getattr(_lowerCamelCase , state.pop("type" ) )
__lowercase = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
__lowercase = value
def SCREAMING_SNAKE_CASE ( self , *_lowerCamelCase , **_lowerCamelCase ) -> BatchEncoding:
'''simple docstring'''
__lowercase = kwargs.get("is_split_into_words" , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , *_lowerCamelCase , **_lowerCamelCase ) -> BatchEncoding:
'''simple docstring'''
__lowercase = kwargs.get("is_split_into_words" , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase=None ) -> str:
'''simple docstring'''
__lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 118
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __a ( __a ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , "num_attention_heads" ) )
class __a :
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=640 , _lowerCamelCase=4 , _lowerCamelCase="silu" , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=None , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = last_hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = conv_kernel_size
__lowercase = output_stride
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = MobileViTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[int] = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = MobileViTModelTester(self )
__lowercase = MobileViTConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__lowercase = outputs.hidden_states
__lowercase = 5
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase = 2
for i in range(len(_lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileViTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowerCAmelCase__ ( ) ->List[Any]:
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(_lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__lowercase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(_lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowerCamelCase )
__lowercase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(_lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(50, 60)] )
__lowercase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
__lowercase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 118
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase__ ='src/diffusers'
# Matches is_xxx_available()
UpperCamelCase__ =re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase__ =re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase__ ='\n{0} = None\n'
UpperCamelCase__ ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase__ ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_backend.findall(__lowerCamelCase )
if len(__lowerCamelCase ) == 0:
return None
return "_and_".join(__lowerCamelCase )
def lowerCamelCase__ ():
with open(os.path.join(__lowerCamelCase, "__init__.py" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Get to the point we do the actual imports for type checking
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : str = {}
# Go through the end of the file
while line_index < len(__lowerCamelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE : List[Any] = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCamelCase ) and len(lines[line_index] ) > 1:
_SCREAMING_SNAKE_CASE : Dict = lines[line_index]
_SCREAMING_SNAKE_CASE : Optional[int] = _re_single_line_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = objects
else:
line_index += 1
return backend_specific_objects
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCamelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCamelCase, __lowerCamelCase )
else:
return DUMMY_CLASS.format(__lowerCamelCase, __lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase=None ):
if backend_specific_objects is None:
_SCREAMING_SNAKE_CASE : str = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_SCREAMING_SNAKE_CASE : List[Any] = {}
for backend, objects in backend_specific_objects.items():
_SCREAMING_SNAKE_CASE : List[str] = "[" + ", ".join(f"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCamelCase, __lowerCamelCase ) for o in objects] )
_SCREAMING_SNAKE_CASE : int = dummy_file
return dummy_files
def lowerCamelCase__ (__lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : int = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_SCREAMING_SNAKE_CASE : Tuple = {"torch": "pt"}
# Locate actual dummy modules and read their content.
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(__lowerCamelCase, "utils" )
_SCREAMING_SNAKE_CASE : Optional[int] = {
backend: os.path.join(__lowerCamelCase, f"""dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py""" )
for backend in dummy_files.keys()
}
_SCREAMING_SNAKE_CASE : int = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : Optional[int] = f.read()
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py as the main """
"__init__ has new objects." )
with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f"""diffusers.utils.dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py. Run `make fix-copies` """
"to fix this." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 381
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_2 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=0.02 , __lowerCamelCase=0 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
_SCREAMING_SNAKE_CASE : str = seq_length
_SCREAMING_SNAKE_CASE : str = is_training
_SCREAMING_SNAKE_CASE : str = use_input_mask
_SCREAMING_SNAKE_CASE : int = use_labels
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = projection_dim
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : int = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
_SCREAMING_SNAKE_CASE : int = dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
_SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = scope
_SCREAMING_SNAKE_CASE : Any = bos_token_id
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = input_mask.numpy()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = input_mask.shape
_SCREAMING_SNAKE_CASE : str = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[str]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = TFBlipTextModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , training=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = config_and_inputs
_SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (TFBlipTextModel,) if is_tf_available() else ()
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = BlipTextModelTester(self )
_SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
pass
def UpperCamelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCamelCase_ ( self ) -> Dict:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
@slow
def UpperCamelCase_ ( self ) -> int:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlipTextModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase=True ) -> int:
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCamelCase )
| 381
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def _A( UpperCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
__lowercase = r'''\w+[.]\d+'''
__lowercase = re.findall(UpperCamelCase__ , UpperCamelCase__ )
for pat in pats:
__lowercase = key.replace(UpperCamelCase__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def _A( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
__lowercase = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__lowercase = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__lowercase = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__lowercase = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowercase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowercase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowercase = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowercase = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _A( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]=42 ) -> List[str]:
'''simple docstring'''
__lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__lowercase = flax_model.init_weights(PRNGKey(UpperCamelCase__ ) )
__lowercase = flatten_dict(UpperCamelCase__ )
__lowercase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowercase = rename_key(UpperCamelCase__ )
__lowercase = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__lowercase , __lowercase = rename_key_and_reshape_tensor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
__lowercase = jnp.asarray(UpperCamelCase__ )
return unflatten_dict(UpperCamelCase__ )
| 332
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , *,
lowerCamelCase__ : int = 4 , lowerCamelCase__ : int = 768 , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Parameter(torch.zeros(lowerCamelCase__ ) )
# parameters for additional clip time embeddings
__lowercase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
# parameters for encoder hidden states
__lowercase = clip_extra_context_tokens
__lowercase = nn.Linear(
lowerCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
__lowercase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = nn.LayerNorm(lowerCamelCase__ )
def UpperCAmelCase_ ( self : int , *, lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase = image_embeddings.shape[0]
__lowercase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowercase = classifier_free_guidance_embeddings.expand(
lowerCamelCase__ , -1 )
__lowercase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase = self.embedding_proj(lowerCamelCase__ )
__lowercase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase__ )
__lowercase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase = self.clip_extra_context_tokens_proj(lowerCamelCase__ )
__lowercase = clip_extra_context_tokens.reshape(lowerCamelCase__ , -1 , self.clip_extra_context_tokens )
__lowercase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowercase = self.encoder_hidden_states_proj(lowerCamelCase__ )
__lowercase = self.text_encoder_hidden_states_norm(lowerCamelCase__ )
__lowercase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 332
| 1
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = decoder_seq_length
# For common tests
UpperCamelCase__ = self.decoder_seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = decoder_start_token_id
UpperCamelCase__ = use_cache
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = None
UpperCamelCase__ = decoder_seq_length
UpperCamelCase__ = 2
UpperCamelCase__ = 1
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = True
UpperCamelCase__ = TrOCRDecoder(config=__A ).to(__A ).eval()
UpperCamelCase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase__ = model(__A , use_cache=__A )
UpperCamelCase__ = model(__A )
UpperCamelCase__ = model(__A , use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
UpperCamelCase__ = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = model(__A )["""last_hidden_state"""]
UpperCamelCase__ = model(__A , past_key_values=__A )["""last_hidden_state"""]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__A , __A , atol=1E-3 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class __A( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TrOCRStandaloneDecoderModelTester(self , is_training=__A )
UpperCamelCase__ = ConfigTester(self , config_class=__A )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__A )
def UpperCAmelCase_ (self ):
return
@unittest.skip("""The model doesn\'t support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase_ (self ):
pass
| 714
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase_ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __magic_name__ ( __a : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __magic_name__ ( __a : List[Any] , __a : Any ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
elif args.student_type == "gpt2":
UpperCamelCase__ = False
def __magic_name__ ( __a : int , __a : Dict ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__a , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=__a , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__a , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__a , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" )
UpperCamelCase__ = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__a ) , __a , indent=4 )
git_log(args.dump_path )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCamelCase__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase__ = tokenizer.all_special_tokens.index(__a )
UpperCamelCase__ = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
UpperCamelCase__ = special_tok_ids
UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a )
UpperCamelCase__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase__ = 0.0 # do not predict special tokens
UpperCamelCase__ = torch.from_numpy(__a )
else:
UpperCamelCase__ = None
UpperCamelCase__ = LmSeqsDataset(params=__a , data=__a )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
UpperCamelCase__ = student_config_class.from_pretrained(args.student_config )
UpperCamelCase__ = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a )
else:
UpperCamelCase__ = student_model_class(__a )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a , __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a , __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase__ = Distiller(
params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 86
| 0
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase = """FlavaImageProcessor"""
_UpperCAmelCase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if images is not None:
SCREAMING_SNAKE_CASE_ : int = self.image_processor(
lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 101
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Union[str, Any] = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = '''distilbert'''
a__ = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : Union[str, Any] , a : Dict=30_522 , a : List[str]=512 , a : List[Any]=False , a : List[Any]=6 , a : str=12 , a : Optional[int]=768 , a : Optional[Any]=4 * 768 , a : Tuple=0.1 , a : str=0.1 , a : Any="gelu" , a : Optional[int]=0.02 , a : Union[str, Any]=0.1 , a : List[str]=0.2 , a : Optional[int]=0 , **a : Optional[int] , ) -> Any:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = sinusoidal_pos_embds
SCREAMING_SNAKE_CASE = n_layers
SCREAMING_SNAKE_CASE = n_heads
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = hidden_dim
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = qa_dropout
SCREAMING_SNAKE_CASE = seq_classif_dropout
super().__init__(**a , pad_token_id=a )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 450
|
from __future__ import annotations
import math
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if num <= 0:
SCREAMING_SNAKE_CASE = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [True] * (num + 1)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 450
| 1
|
def __lowercase ( a__ , a__ ) -> int:
while b:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a % b
return a
def __lowercase ( a__ , a__ ) -> int:
return a if b == 0 else euclidean_gcd_recursive(a__ , a % b )
def __lowercase ( ) -> List[Any]:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 148
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : List[str] =logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase__ : Dict ={
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCAmelCase__ : Dict ={'''facebook/blenderbot_small-90M''': 512}
def __lowercase ( a__ ) -> str:
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE = char
__SCREAMING_SNAKE_CASE = set(a__ )
return pairs
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = VOCAB_FILES_NAMES
UpperCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , _A , _A , _A="__start__" , _A="__end__" , _A="__unk__" , _A="__null__" , **_A , ):
'''simple docstring'''
super().__init__(unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , **_A )
with open(_A , encoding='utf-8' ) as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(_A )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
with open(_A , encoding='utf-8' ) as merges_handle:
__SCREAMING_SNAKE_CASE = merges_handle.read().split('\n' )[1:-1]
__SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in merges]
__SCREAMING_SNAKE_CASE = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE = {}
@property
def _A ( self ):
'''simple docstring'''
return len(self.encoder )
def _A ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self , _A ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE = re.sub('([.,!?()])' , r' \1' , _A )
__SCREAMING_SNAKE_CASE = re.sub('(\')' , r' \1 ' , _A )
__SCREAMING_SNAKE_CASE = re.sub(r'\s{2,}' , ' ' , _A )
if "\n" in token:
__SCREAMING_SNAKE_CASE = token.replace('\n' , ' __newln__' )
__SCREAMING_SNAKE_CASE = token.split(' ' )
__SCREAMING_SNAKE_CASE = []
for token in tokens:
if not len(_A ):
continue
__SCREAMING_SNAKE_CASE = token.lower()
__SCREAMING_SNAKE_CASE = tuple(_A )
__SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__SCREAMING_SNAKE_CASE = get_pairs(_A )
if not pairs:
words.append(_A )
continue
while True:
__SCREAMING_SNAKE_CASE = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bigram
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while i < len(_A ):
try:
__SCREAMING_SNAKE_CASE = word.index(_A , _A )
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE = tuple(_A )
__SCREAMING_SNAKE_CASE = new_word
if len(_A ) == 1:
break
else:
__SCREAMING_SNAKE_CASE = get_pairs(_A )
__SCREAMING_SNAKE_CASE = '@@ '.join(_A )
__SCREAMING_SNAKE_CASE = word[:-4]
__SCREAMING_SNAKE_CASE = word
words.append(_A )
return " ".join(_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = re.findall(r'\S+\n?' , _A )
for token in words:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = token.lower()
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def _A ( self , _A ):
'''simple docstring'''
return self.decoder.get(_A , self.unk_token )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ' '.join(_A ).replace('@@ ' , '' ).strip()
return out_string
def _A ( self , _A , _A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '\n' )
__SCREAMING_SNAKE_CASE = 0
with open(_A , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__SCREAMING_SNAKE_CASE = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return vocab_file, merge_file
| 148
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 'openai/whisper-base'
A = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
A = 'transcriber'
A = WhisperProcessor
A = WhisperForConditionalGeneration
A = ['audio']
A = ['text']
def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :int ) -> Dict:
"""simple docstring"""
return self.pre_processor(lowerCamelCase_ , return_tensors="pt" ).input_features
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.model.generate(inputs=lowerCamelCase_ )
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :int ) -> List[Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )[0]
| 304
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :TransformeraDModel , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :KarrasDiffusionSchedulers , lowerCamelCase_ :Optional[Dict[int, str]] = None , ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=lowerCamelCase_ , vae=lowerCamelCase_ , scheduler=lowerCamelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCamelCase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCamelCase__ = int(lowerCamelCase_ )
UpperCamelCase__ = dict(sorted(self.labels.items() ) )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase__ = list(lowerCamelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :float = 4.0 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :int = 5_0 , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
UpperCamelCase__ = len(lowerCamelCase_ )
UpperCamelCase__ = self.transformer.config.sample_size
UpperCamelCase__ = self.transformer.config.in_channels
UpperCamelCase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCamelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCamelCase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCamelCase__ = torch.tensor(lowerCamelCase_ , device=self.device ).reshape(-1 )
UpperCamelCase__ = torch.tensor([1_0_0_0] * batch_size , device=self.device )
UpperCamelCase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCamelCase__ = latent_model_input[: len(lowerCamelCase_ ) // 2]
UpperCamelCase__ = torch.cat([half, half] , dim=0 )
UpperCamelCase__ = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = t
if not torch.is_tensor(lowerCamelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCamelCase__ = latent_model_input.device.type == "mps"
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase__ = torch.floataa if is_mps else torch.floataa
else:
UpperCamelCase__ = torch.intaa if is_mps else torch.intaa
UpperCamelCase__ = torch.tensor([timesteps] , dtype=lowerCamelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCamelCase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCamelCase__ = self.transformer(
lowerCamelCase_ , timestep=lowerCamelCase_ , class_labels=lowerCamelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCamelCase__ , UpperCamelCase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCamelCase__ , UpperCamelCase__ = torch.split(lowerCamelCase_ , len(lowerCamelCase_ ) // 2 , dim=0 )
UpperCamelCase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCamelCase__ = torch.cat([half_eps, half_eps] , dim=0 )
UpperCamelCase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCamelCase__ , UpperCamelCase__ = torch.split(lowerCamelCase_ , lowerCamelCase_ , dim=1 )
else:
UpperCamelCase__ = noise_pred
# compute previous image: x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
if guidance_scale > 1:
UpperCamelCase__ , UpperCamelCase__ = latent_model_input.chunk(2 , dim=0 )
else:
UpperCamelCase__ = latent_model_input
UpperCamelCase__ = 1 / self.vae.config.scaling_factor * latents
UpperCamelCase__ = self.vae.decode(lowerCamelCase_ ).sample
UpperCamelCase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 304
| 1
|
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
UpperCAmelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
UpperCAmelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
UpperCAmelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
UpperCAmelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
UpperCAmelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
UpperCAmelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
UpperCAmelCase_ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
UpperCAmelCase_ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
UpperCAmelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
UpperCAmelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def lowercase__ ( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCAmelCase_ = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def lowercase__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = fill_masker.tokenizer
UpperCAmelCase_ = fill_masker.model
UpperCAmelCase_ = fill_masker(
F"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
UpperCAmelCase_ = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
UpperCAmelCase_ = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker("This is" )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = tokenizer.get_vocab()
UpperCAmelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
UpperCAmelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _UpperCAmelCase )
UpperCAmelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
UpperCAmelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _UpperCAmelCase )
UpperCAmelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCAmelCase_ = [top_mask["token_str"] for top_mask in outputs]
UpperCAmelCase_ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCAmelCase_ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[""] )
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets="" )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tokenizer.get_vocab()
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCAmelCase_ = sorted(vocab.keys() )[:3]
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase_ = [el["token_str"] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def lowercase__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase_ = sorted(vocab.keys() )[:3]
UpperCAmelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase_ = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def lowercase__ ( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCAmelCase_ = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
] , )
| 82
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( lowerCAmelCase__ = 35 ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 82
| 1
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
a__ : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
a__ : Dict = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
a__ : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
a__ : List[Any] = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
a__ : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
a__ : Optional[Any] = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
a__ : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
a__ : Optional[int] = np.expand_dims(test_image, axis=0)
a__ : Optional[Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
a__ : Dict = '''Normal'''
if result[0][0] == 1:
a__ : Union[str, Any] = '''Abnormality detected'''
| 333
|
from __future__ import annotations
from typing import Any
def UpperCAmelCase_( a__ ):
"""simple docstring"""
create_state_space_tree(a__ , [] , 0 )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if index == len(a__ ):
print(a__ )
return
create_state_space_tree(a__ , a__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(a__ , a__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
a__ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 333
| 1
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = {}
__UpperCAmelCase = job['''started_at''']
__UpperCAmelCase = job['''completed_at''']
__UpperCAmelCase = date_parser.parse(snake_case_ )
__UpperCAmelCase = date_parser.parse(snake_case_ )
__UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__UpperCAmelCase = start
__UpperCAmelCase = end
__UpperCAmelCase = duration_in_min
return job_info
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any]=None ):
__UpperCAmelCase = None
if token is not None:
__UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
__UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__UpperCAmelCase = requests.get(snake_case_ , headers=snake_case_ ).json()
__UpperCAmelCase = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} )
__UpperCAmelCase = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(snake_case_ ):
__UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=snake_case_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_lowercase : Tuple = parser.parse_args()
_lowercase : List[str] = get_job_time(args.workflow_run_id)
_lowercase : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 49
|
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 32
| 0
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , __UpperCamelCase : str , __UpperCamelCase : Dict=13 , __UpperCamelCase : List[Any]=30 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Tuple=32 , __UpperCamelCase : str=5 , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Optional[int]=10 , __UpperCamelCase : int=0.02 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : str=None , __UpperCamelCase : Dict=2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 2
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict ):
_UpperCAmelCase = DeiTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
_UpperCAmelCase = DeiTForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = DeiTForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = DeiTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = DeiTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Dict = False
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = DeiTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict=False ):
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Dict ):
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase = False
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
_UpperCAmelCase = problem_type["title"]
_UpperCAmelCase = problem_type["num_labels"]
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
_UpperCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_UpperCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
_UpperCAmelCase = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : str ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DeiTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( ) -> List[Any]:
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="pt" )
_UpperCAmelCase = inputs.pixel_values.to(__UpperCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )
| 129
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 50_257 , __UpperCamelCase : int = 1_024 , __UpperCamelCase : int = 768 , __UpperCamelCase : int = 12 , __UpperCamelCase : int = 12 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "gelu_new" , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 1e-5 , __UpperCamelCase : float = 0.02 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , ):
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , __UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=__UpperCamelCase , n_positions=__UpperCamelCase , n_embd=__UpperCamelCase , n_layer=__UpperCamelCase , n_head=__UpperCamelCase , n_inner=__UpperCamelCase , activation_function=__UpperCamelCase , resid_pdrop=__UpperCamelCase , embd_pdrop=__UpperCamelCase , attn_pdrop=__UpperCamelCase , layer_norm_epsilon=__UpperCamelCase , initializer_range=__UpperCamelCase , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , scale_attn_by_inverse_layer_idx=__UpperCamelCase , reorder_and_upcast_attn=__UpperCamelCase , )
_UpperCAmelCase = GPTaLMHeadModel(__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : torch.Tensor , __UpperCamelCase : torch.Tensor , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[torch.Tensor] = None , ):
_UpperCAmelCase = self.transformer.transformer.wte(__UpperCamelCase )
_UpperCAmelCase = self.encode_prefix(__UpperCamelCase )
_UpperCAmelCase = self.decode_prefix(__UpperCamelCase )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=__UpperCamelCase , labels=__UpperCamelCase , attention_mask=__UpperCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : torch.device ):
return torch.zeros(__UpperCamelCase , self.prefix_length , dtype=torch.intaa , device=__UpperCamelCase )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Optional[Any] ):
return self.encode_prefix(__UpperCamelCase )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = torch.split(__UpperCamelCase , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(__UpperCamelCase ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=__UpperCamelCase , device=__UpperCamelCase , eos_token_id=__UpperCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(__UpperCamelCase )
_UpperCAmelCase = torch.stack(__UpperCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : int = 5 , __UpperCamelCase : int = 67 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : Optional[int] = None , ):
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.int )
_UpperCAmelCase = torch.zeros(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(__UpperCamelCase )
for i in range(__UpperCamelCase ):
_UpperCAmelCase = self.transformer(inputs_embeds=__UpperCamelCase )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(__UpperCamelCase , -1 )
_UpperCAmelCase = generated.expand(__UpperCamelCase , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(__UpperCamelCase , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(__UpperCamelCase , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(__UpperCamelCase ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=__UpperCamelCase )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(__UpperCamelCase , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 129
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if not (isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(__UpperCamelCase , __UpperCamelCase )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
__lowercase : List[str] = len(__UpperCamelCase )
__lowercase : Dict = len(__UpperCamelCase )
__lowercase : Optional[Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowercase : Optional[Any] = 0
__lowercase : Union[str, Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowercase : Optional[Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowercase : List[str] = i
__lowercase : List[Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(set_a.intersection(lowerCAmelCase__ ) )
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
else:
__a = len(set_a.union(lowerCAmelCase__ ) )
return intersection / union
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return None
if __name__ == "__main__":
lowercase_ = {"a", "b", "c", "d", "e"}
lowercase_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 695
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = GPTSanJapaneseTokenizer
_lowerCamelCase = False
_lowerCamelCase = {"do_clean_text": False, "add_prefix_space": False}
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowerCamelCase_ = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase_ = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
lowerCamelCase_ = {"unk_token": "<unk>"}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase ) )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "こんにちは、世界。 \nこんばんは、㔺界。😀"
lowerCamelCase_ = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(UpperCamelCase )
lowerCamelCase_ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowerCamelCase_ = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
return text, ids
def snake_case ( self ):
"""simple docstring"""
pass # TODO add if relevant
def snake_case ( self ):
"""simple docstring"""
pass # TODO add if relevant
def snake_case ( self ):
"""simple docstring"""
pass # TODO add if relevant
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = "こんにちは、世界。 こんばんは、㔺界。"
lowerCamelCase_ = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
lowerCamelCase_ = "こんにちは、、、、世界。こんばんは、、、、世界。"
lowerCamelCase_ = tokenizer.encode(UpperCamelCase )
lowerCamelCase_ = tokenizer.decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
lowerCamelCase_ = "こんにちは、世界。"
lowerCamelCase_ = "こんばんは、㔺界。😀"
lowerCamelCase_ = "こんにちは、世界。こんばんは、世界。😀"
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode("" , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(UpperCamelCase , prefix_text=UpperCamelCase )
lowerCamelCase_ = tokenizer.decode(UpperCamelCase )
lowerCamelCase_ = tokenizer.decode(UpperCamelCase )
lowerCamelCase_ = tokenizer.decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
lowerCamelCase_ = "こんにちは、世界。"
lowerCamelCase_ = "こんばんは、㔺界。😀"
lowerCamelCase_ = len(tokenizer.encode(UpperCamelCase ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(UpperCamelCase ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(UpperCamelCase , prefix_text=UpperCamelCase ).token_type_ids
self.assertListEqual(UpperCamelCase , UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
lowerCamelCase_ = tokenizer.encode("あンいワ" )
lowerCamelCase_ = tokenizer.encode("" , prefix_text="あンいワ" )
lowerCamelCase_ = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(UpperCamelCase ) , tokenizer.decode(UpperCamelCase ) )
self.assertEqual(tokenizer.decode(UpperCamelCase ) , tokenizer.decode(UpperCamelCase ) )
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
lowerCamelCase_ = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
lowerCamelCase_ = tokenizer(UpperCamelCase , padding=UpperCamelCase )
lowerCamelCase_ = tokenizer.batch_encode_plus(UpperCamelCase , padding=UpperCamelCase )
# fmt: off
lowerCamelCase_ = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCamelCase )
self.assertListEqual(x_token.token_type_ids , UpperCamelCase )
self.assertListEqual(x_token.attention_mask , UpperCamelCase )
self.assertListEqual(x_token_a.input_ids , UpperCamelCase )
self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase )
self.assertListEqual(x_token_a.attention_mask , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def snake_case ( self ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 675
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def snake_case ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self ):
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 675
| 1
|
'''simple docstring'''
import requests
UpperCAmelCase__ :Optional[Any] = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def __lowercase (_lowercase ) -> None:
"""simple docstring"""
# fetching a list of articles in json format
__lowerCamelCase : List[str] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""], 1 ):
print(f"{i}.) {article['title']}" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 483
|
'''simple docstring'''
from statistics import mean
import numpy as np
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase ) -> list:
"""simple docstring"""
__lowerCamelCase : str = 0
# Number of processes finished
__lowerCamelCase : List[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__lowerCamelCase : int = [0] * no_of_process
# List to include calculation results
__lowerCamelCase : Optional[int] = [0] * no_of_process
# Sort by arrival time.
__lowerCamelCase : int = [burst_time[i] for i in np.argsort(_lowercase )]
__lowerCamelCase : Union[str, Any] = [process_name[i] for i in np.argsort(_lowercase )]
arrival_time.sort()
while no_of_process > finished_process_count:
__lowerCamelCase : Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__lowerCamelCase : List[str] = arrival_time[i]
__lowerCamelCase : Dict = 0
# Index showing the location of the process being performed
__lowerCamelCase : Optional[Any] = 0
# Saves the current response ratio.
__lowerCamelCase : List[str] = 0
for i in range(0, _lowercase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__lowerCamelCase : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__lowerCamelCase : List[Any] = temp
__lowerCamelCase : str = i
# Calculate the turn around time
__lowerCamelCase : Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__lowerCamelCase : Optional[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase ) -> list:
"""simple docstring"""
__lowerCamelCase : List[Any] = [0] * no_of_process
for i in range(0, _lowercase ):
__lowerCamelCase : Tuple = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCAmelCase__ :List[Any] = 5
UpperCAmelCase__ :Tuple = ["""A""", """B""", """C""", """D""", """E"""]
UpperCAmelCase__ :Tuple = [1, 2, 3, 4, 5]
UpperCAmelCase__ :List[Any] = [1, 2, 3, 4, 5]
UpperCAmelCase__ :Union[str, Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCAmelCase__ :Optional[int] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 483
| 1
|
from math import sqrt
def a_ ( lowerCAmelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(sqrt(lowerCAmelCase_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( lowerCAmelCase_ : int = 1_0001 ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCAmelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCAmelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__lowerCamelCase : List[Any] = random.Random()
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ) -> Tuple:
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Union[str, Any]=4_00 , UpperCamelCase__ : Optional[Any]=20_00 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : int=1_60_00 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=80 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : int=64 , UpperCamelCase__ : Dict="hann_window" , UpperCamelCase__ : Dict=80 , UpperCamelCase__ : Any=76_00 , UpperCamelCase__ : List[str]=1e-1_0 , UpperCamelCase__ : Optional[int]=True , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[str]=False ) -> Tuple:
'''simple docstring'''
def _flatten(UpperCamelCase__ : List[Any] ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int=False ) -> Dict:
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[Any] =SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(UpperCamelCase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
UpperCAmelCase = feat_extract(UpperCamelCase__ , return_tensors="np" ).input_values
UpperCAmelCase = feat_extract(UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = ["longest", "max_length", "do_not_pad"]
UpperCAmelCase = [None, 16_00, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = feat_extract(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_00 , 14_00 , 2_00 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ["longest", "max_length", "do_not_pad"]
UpperCAmelCase = [None, 16_00, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = feat_extract(UpperCamelCase__ , max_length=UpperCamelCase__ , padding=UpperCamelCase__ )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10_00 , padding="max_length" , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10_00 , padding="longest" , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=20_00 , padding="longest" , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_00 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCAmelCase = np.asarray(UpperCamelCase__ )
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCamelCase__ ) == len(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCamelCase__ )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCamelCase__ )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**UpperCamelCase__ )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(UpperCamelCase__ ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**UpperCamelCase__ )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(UpperCamelCase__ ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(UpperCamelCase__ )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort("id" ).select(range(UpperCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , UpperCamelCase__ , atol=1e-6 ) )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=UpperCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , UpperCamelCase__ , atol=1e-4 ) )
| 323
| 0
|
'''simple docstring'''
def snake_case__ ( _A: int = 10 , _A: int = 22 ) -> int:
'''simple docstring'''
lowerCAmelCase = range(1 , _A )
lowerCAmelCase = range(1 , _A )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 605
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def snake_case__ ( _A: Optional[Any] , _A: List[Any]=0.999 , _A: str="cosine" , ) -> Union[str, Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A: Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A: List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowerCAmelCase = []
for i in range(_A ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase_ : Union[str, Any] = 2
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = 0.00085 , __lowerCAmelCase = 0.012 , __lowerCAmelCase = "linear" , __lowerCAmelCase = None , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = "linspace" , __lowerCAmelCase = 0 , ):
"""simple docstring"""
if trained_betas is not None:
lowerCAmelCase = torch.tensor(__lowerCAmelCase , dtype=torch.floataa)
elif beta_schedule == "linear":
lowerCAmelCase = torch.linspace(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCAmelCase , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(__lowerCAmelCase)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
lowerCAmelCase = 1.0 - self.betas
lowerCAmelCase = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
if schedule_timesteps is None:
lowerCAmelCase = self.timesteps
lowerCAmelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
lowerCAmelCase = 1 if len(__lowerCAmelCase) > 1 else 0
else:
lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(__lowerCAmelCase) else timestep
lowerCAmelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a_ ( self):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.index_for_timestep(__lowerCAmelCase)
if self.state_in_first_order:
lowerCAmelCase = self.sigmas[step_index]
else:
lowerCAmelCase = self.sigmas_interpol[step_index]
lowerCAmelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = num_inference_steps
lowerCAmelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase = np.linspace(0 , num_train_timesteps - 1 , __lowerCAmelCase , dtype=__lowerCAmelCase)[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(0 , __lowerCAmelCase) * step_ratio).round()[::-1].copy().astype(__lowerCAmelCase)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(__lowerCAmelCase , 0 , -step_ratio)).round().copy().astype(__lowerCAmelCase)
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.")
lowerCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
lowerCAmelCase = torch.from_numpy(np.log(__lowerCAmelCase)).to(__lowerCAmelCase)
lowerCAmelCase = np.interp(__lowerCAmelCase , np.arange(0 , len(__lowerCAmelCase)) , __lowerCAmelCase)
lowerCAmelCase = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(device=__lowerCAmelCase)
# interpolate sigmas
lowerCAmelCase = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
lowerCAmelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
lowerCAmelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(__lowerCAmelCase).startswith("""mps"""):
# mps does not support float64
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(__lowerCAmelCase , dtype=torch.floataa)
else:
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(__lowerCAmelCase)
# interpolate timesteps
lowerCAmelCase = self.sigma_to_t(__lowerCAmelCase).to(__lowerCAmelCase , dtype=timesteps.dtype)
lowerCAmelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
lowerCAmelCase = torch.cat([timesteps[:1], interleaved_timesteps])
lowerCAmelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase = defaultdict(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = sigma.log()
# get distribution
lowerCAmelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCAmelCase = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
lowerCAmelCase = low_idx + 1
lowerCAmelCase = self.log_sigmas[low_idx]
lowerCAmelCase = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase = (low - log_sigma) / (low - high)
lowerCAmelCase = w.clamp(0 , 1)
# transform interpolation to time range
lowerCAmelCase = (1 - w) * low_idx + w * high_idx
lowerCAmelCase = t.view(sigma.shape)
return t
@property
def a_ ( self):
"""simple docstring"""
return self.sample is None
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , ):
"""simple docstring"""
lowerCAmelCase = self.index_for_timestep(__lowerCAmelCase)
# advance index counter by 1
lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(__lowerCAmelCase) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase = self.sigmas[step_index]
lowerCAmelCase = self.sigmas_interpol[step_index + 1]
lowerCAmelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCAmelCase = self.sigmas[step_index - 1]
lowerCAmelCase = self.sigmas_interpol[step_index]
lowerCAmelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase = 0
lowerCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""")
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`")
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCAmelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCAmelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCAmelCase = sigma_next - sigma_hat
lowerCAmelCase = self.sample
lowerCAmelCase = None
lowerCAmelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCAmelCase):
# mps does not support float64
lowerCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa)
lowerCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
lowerCAmelCase = self.timesteps.to(original_samples.device)
lowerCAmelCase = timesteps.to(original_samples.device)
lowerCAmelCase = [self.index_for_timestep(__lowerCAmelCase , __lowerCAmelCase) for t in timesteps]
lowerCAmelCase = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
lowerCAmelCase = sigma.unsqueeze(-1)
lowerCAmelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self):
"""simple docstring"""
return self.config.num_train_timesteps
| 605
| 1
|
def a_ (__A ) -> List[str]:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__a : Union[str, Any] = sum(_A ) / len(_A ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__( lowerCamelCase__ ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BridgeTowerImageProcessor"""
lowercase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : int , __snake_case : str , __snake_case : List[str] ):
super().__init__(__snake_case , __snake_case )
def __call__( self : int , __snake_case : Optional[Any] , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[Any] , ):
a : Optional[int] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel_values + pixel_mask
a : List[str] = self.image_processor(
__snake_case , return_tensors=__snake_case , do_normalize=__snake_case , do_center_crop=__snake_case , **__snake_case )
encoding.update(__snake_case )
return encoding
def lowercase_ ( self : int , *__snake_case : List[str] , **__snake_case : List[str] ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowercase_ ( self : List[str] , *__snake_case : Tuple , **__snake_case : Union[str, Any] ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = self.tokenizer.model_input_names
a : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 526
| 0
|
from __future__ import annotations
from typing import Any
def __magic_name__ ( lowercase ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
lowercase_ : Dict = {"""+""", """-""", """*""", """/"""}
lowercase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase_ , lowercase_ : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 436
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[str] = """unispeech"""
def __init__( self, snake_case__=32, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=0.1, snake_case__=0.0, snake_case__=0.0, snake_case__=0.1, snake_case__=0.1, snake_case__=0.02, snake_case__=1E-5, snake_case__="group", snake_case__="gelu", snake_case__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12), snake_case__=(5, 2, 2, 2, 2, 2, 2), snake_case__=(10, 3, 3, 3, 3, 2, 2), snake_case__=False, snake_case__=1_28, snake_case__=16, snake_case__=False, snake_case__=True, snake_case__=0.05, snake_case__=10, snake_case__=2, snake_case__=0.0, snake_case__=10, snake_case__=0, snake_case__=3_20, snake_case__=2, snake_case__=0.1, snake_case__=1_00, snake_case__=2_56, snake_case__=2_56, snake_case__=0.1, snake_case__="mean", snake_case__=False, snake_case__=False, snake_case__=2_56, snake_case__=80, snake_case__=0, snake_case__=1, snake_case__=2, snake_case__=0.5, **snake_case__, ) -> List[str]:
"""simple docstring"""
super().__init__(**snake_case__, pad_token_id=snake_case__, bos_token_id=snake_case__, eos_token_id=snake_case__ )
lowercase_ : int = hidden_size
lowercase_ : List[Any] = feat_extract_norm
lowercase_ : Optional[Any] = feat_extract_activation
lowercase_ : Any = list(snake_case__ )
lowercase_ : Optional[Any] = list(snake_case__ )
lowercase_ : List[str] = list(snake_case__ )
lowercase_ : Optional[Any] = conv_bias
lowercase_ : Union[str, Any] = num_conv_pos_embeddings
lowercase_ : Optional[Any] = num_conv_pos_embedding_groups
lowercase_ : str = len(self.conv_dim )
lowercase_ : int = num_hidden_layers
lowercase_ : List[str] = intermediate_size
lowercase_ : int = hidden_act
lowercase_ : List[Any] = num_attention_heads
lowercase_ : str = hidden_dropout
lowercase_ : List[str] = attention_dropout
lowercase_ : Union[str, Any] = activation_dropout
lowercase_ : Optional[int] = feat_proj_dropout
lowercase_ : str = final_dropout
lowercase_ : Union[str, Any] = layerdrop
lowercase_ : Any = layer_norm_eps
lowercase_ : Tuple = initializer_range
lowercase_ : int = num_ctc_classes
lowercase_ : Tuple = vocab_size
lowercase_ : Any = do_stable_layer_norm
lowercase_ : int = use_weighted_layer_sum
lowercase_ : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ : List[str] = apply_spec_augment
lowercase_ : str = mask_time_prob
lowercase_ : int = mask_time_length
lowercase_ : str = mask_time_min_masks
lowercase_ : int = mask_feature_prob
lowercase_ : List[str] = mask_feature_length
lowercase_ : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase_ : Tuple = num_codevectors_per_group
lowercase_ : int = num_codevector_groups
lowercase_ : List[str] = contrastive_logits_temperature
lowercase_ : Optional[Any] = feat_quantizer_dropout
lowercase_ : Dict = num_negatives
lowercase_ : Dict = codevector_dim
lowercase_ : Optional[int] = proj_codevector_dim
lowercase_ : Any = diversity_loss_weight
# ctc loss
lowercase_ : int = ctc_loss_reduction
lowercase_ : Any = ctc_zero_infinity
# pretraining loss
lowercase_ : int = replace_prob
@property
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 436
| 1
|
def _lowercase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 386
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__snake_case = logging.getLogger(__name__)
def _lowercase ( SCREAMING_SNAKE_CASE_ : torch.nn.Module , SCREAMING_SNAKE_CASE_ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
"""simple docstring"""
UpperCamelCase = bnb_quantization_config.load_in_abit
UpperCamelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
UpperCamelCase = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(device_map.keys() ) > 1:
UpperCamelCase = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase = get_keys_to_not_convert(SCREAMING_SNAKE_CASE_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase = []
UpperCamelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE_ )
# compatibility with peft
UpperCamelCase = load_in_abit
UpperCamelCase = load_in_abit
UpperCamelCase = get_parameter_device(SCREAMING_SNAKE_CASE_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
UpperCamelCase = replace_with_bnb_layers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
# convert param to the right dtype
UpperCamelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
param.to(SCREAMING_SNAKE_CASE_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
UpperCamelCase = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_memory=SCREAMING_SNAKE_CASE_ , no_split_module_classes=SCREAMING_SNAKE_CASE_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase = True
UpperCamelCase = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE_ , offload_state_dict=SCREAMING_SNAKE_CASE_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE_ , device_map=SCREAMING_SNAKE_CASE_ , offload_dir=SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
UpperCamelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase = {}
UpperCamelCase = special_dtypes
UpperCamelCase = no_split_module_classes
UpperCamelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase = get_balanced_memory(
SCREAMING_SNAKE_CASE_ , low_zero=(device_map == """balanced_low_0""") , max_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase = max_memory
UpperCamelCase = infer_auto_device_map(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# check if don't have any quantized module on the cpu
UpperCamelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase = """.""".join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
UpperCamelCase = module.weight.data
if module.bias is not None:
UpperCamelCase = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = True
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
with init_empty_weights():
UpperCamelCase = deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase = find_tied_parameters(SCREAMING_SNAKE_CASE_ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(SCREAMING_SNAKE_CASE_ , [] )
UpperCamelCase = len(SCREAMING_SNAKE_CASE_ ) > 0
# Check if it is a base model
UpperCamelCase = False
if hasattr(SCREAMING_SNAKE_CASE_ , """base_model_prefix""" ):
UpperCamelCase = not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ )
# remove ".weight" from the keys
UpperCamelCase = [""".weight""", """.bias"""]
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(SCREAMING_SNAKE_CASE_ , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE_ )
return filtered_module_names
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , bnb.nn.Linearabit ):
return True
return False
def _lowercase ( SCREAMING_SNAKE_CASE_ : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _lowercase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , dtype=SCREAMING_SNAKE_CASE_ , value=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = param_name
UpperCamelCase = model
if "." in tensor_name:
UpperCamelCase = tensor_name.split(""".""" )
for split in splits[:-1]:
UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
# offload weights
UpperCamelCase = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
else:
offload_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
offload_weight(SCREAMING_SNAKE_CASE_ , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """meta""" , dtype=SCREAMING_SNAKE_CASE_ , value=torch.empty(*param.size() ) )
| 386
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_: List[str] = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class a__ ( _a ):
snake_case_ = "wavlm"
def __init__( self, _UpperCAmelCase=32, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.0, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase="group", _UpperCAmelCase="gelu", _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512), _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2), _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2), _UpperCAmelCase=False, _UpperCAmelCase=128, _UpperCAmelCase=16, _UpperCAmelCase=320, _UpperCAmelCase=800, _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=0.05, _UpperCAmelCase=10, _UpperCAmelCase=2, _UpperCAmelCase=0.0, _UpperCAmelCase=10, _UpperCAmelCase=320, _UpperCAmelCase=2, _UpperCAmelCase=0.1, _UpperCAmelCase=100, _UpperCAmelCase=256, _UpperCAmelCase=256, _UpperCAmelCase=0.1, _UpperCAmelCase="mean", _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=256, _UpperCAmelCase=(512, 512, 512, 512, 1500), _UpperCAmelCase=(5, 3, 3, 1, 1), _UpperCAmelCase=(1, 2, 3, 1, 1), _UpperCAmelCase=512, _UpperCAmelCase=80, _UpperCAmelCase=0, _UpperCAmelCase=1, _UpperCAmelCase=2, _UpperCAmelCase=False, _UpperCAmelCase=3, _UpperCAmelCase=2, _UpperCAmelCase=3, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase, pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = feat_extract_norm
lowercase__ = feat_extract_activation
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = conv_bias
lowercase__ = num_buckets
lowercase__ = max_bucket_distance
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = num_ctc_classes
lowercase__ = vocab_size
lowercase__ = do_stable_layer_norm
lowercase__ = use_weighted_layer_sum
lowercase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowercase__ = num_codevectors_per_group
lowercase__ = num_codevector_groups
lowercase__ = contrastive_logits_temperature
lowercase__ = num_negatives
lowercase__ = codevector_dim
lowercase__ = proj_codevector_dim
lowercase__ = diversity_loss_weight
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# adapter
lowercase__ = add_adapter
lowercase__ = adapter_kernel_size
lowercase__ = adapter_stride
lowercase__ = num_adapter_layers
lowercase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = list(_UpperCAmelCase )
lowercase__ = xvector_output_dim
@property
def snake_case__ ( self ):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 668
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668
| 1
|
def snake_case (UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
move_disk(UpperCamelCase , UpperCamelCase )
move_tower(height - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def snake_case (UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
print("""moving disk from""" , UpperCamelCase , """to""" , UpperCamelCase )
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = int(input("""Height of hanoi: """ ).strip() )
move_tower(UpperCamelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 165
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'Wav2Vec2FeatureExtractor'
snake_case_ = 'AutoTokenizer'
def __init__( self : Tuple , a_ : Any , a_ : str ):
"""simple docstring"""
super().__init__(a_ , a_ )
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
@classmethod
def _UpperCamelCase ( cls : List[str] , a_ : Optional[Any] , **a_ : int ):
"""simple docstring"""
try:
return super().from_pretrained(a_ , **a_ )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , a_ , )
lowerCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(a_ , **a_ )
lowerCamelCase__ = WavaVecaCTCTokenizer.from_pretrained(a_ , **a_ )
return cls(feature_extractor=a_ , tokenizer=a_ )
def __call__( self : List[str] , *a_ : int , **a_ : str ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
lowerCamelCase__ = kwargs.pop("""raw_speech""" )
else:
lowerCamelCase__ = kwargs.pop("""audio""" , a_ )
lowerCamelCase__ = kwargs.pop("""sampling_rate""" , a_ )
lowerCamelCase__ = kwargs.pop("""text""" , a_ )
if len(a_ ) > 0:
lowerCamelCase__ = args[0]
lowerCamelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowerCamelCase__ = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if text is not None:
lowerCamelCase__ = self.tokenizer(a_ , **a_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ = encodings["""input_ids"""]
return inputs
def _UpperCamelCase ( self : int , *a_ : List[Any] , **a_ : int ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*a_ , **a_ )
lowerCamelCase__ = kwargs.pop("""input_features""" , a_ )
lowerCamelCase__ = kwargs.pop("""labels""" , a_ )
if len(a_ ) > 0:
lowerCamelCase__ = args[0]
lowerCamelCase__ = args[1:]
if input_features is not None:
lowerCamelCase__ = self.feature_extractor.pad(a_ , *a_ , **a_ )
if labels is not None:
lowerCamelCase__ = self.tokenizer.pad(a_ , **a_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase__ = labels["""input_ids"""]
return input_features
def _UpperCamelCase ( self : str , *a_ : Tuple , **a_ : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self : Union[str, Any] , *a_ : Dict , **a_ : str ):
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@contextmanager
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
lowerCamelCase__ = True
lowerCamelCase__ = self.tokenizer
yield
lowerCamelCase__ = self.feature_extractor
lowerCamelCase__ = False
| 165
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 1
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""UserAgent""": UserAgent().random}
def _lowerCamelCase ( __lowerCamelCase ) -> dict:
'''simple docstring'''
UpperCAmelCase__ : Tuple = script.contents[0]
UpperCAmelCase__ : List[Any] = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
UpperCAmelCase__ : Any = f"https://www.instagram.com/{username}/"
UpperCAmelCase__ : Tuple = self.get_json()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = requests.get(self.url , headers=_lowerCAmelCase ).text
UpperCAmelCase__ : Optional[Any] = BeautifulSoup(_lowerCAmelCase , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def __UpperCAmelCase ( self ):
return self.user_data["username"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["biography"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __UpperCAmelCase ( self ):
return self.user_data["is_private"]
def _lowerCamelCase ( __lowerCamelCase = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
UpperCAmelCase__ : Optional[Any] = InstagramUser(__lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Dict = InstagramUser("""github""")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 79
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def UpperCamelCase ( snake_case__ : int ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Tuple = """mock-s3-bucket"""
__snake_case :Optional[Any] = f'''s3://{mock_bucket}'''
__snake_case :Optional[int] = extract_path_from_uri(snake_case__ )
assert dataset_path.startswith("""s3://""" ) is False
__snake_case :Union[str, Any] = """./local/path"""
__snake_case :List[str] = extract_path_from_uri(snake_case__ )
assert dataset_path == new_dataset_path
def UpperCamelCase ( snake_case__ : str ):
'''simple docstring'''
__snake_case :List[str] = is_remote_filesystem(snake_case__ )
assert is_remote is True
__snake_case :Any = fsspec.filesystem("""file""" )
__snake_case :Optional[Any] = is_remote_filesystem(snake_case__ )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" ,snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ,snake_case__ : List[str] ,snake_case__ : int ,snake_case__ : List[Any] ,snake_case__ : Tuple ,snake_case__ : str ,snake_case__ : Optional[Any] ):
'''simple docstring'''
__snake_case :Optional[int] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
__snake_case :Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
__snake_case :Optional[int] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
__snake_case :Optional[Any] = fsspec.filesystem(compression_fs_class.protocol ,fo=snake_case__ )
assert isinstance(snake_case__ ,snake_case__ )
__snake_case :Dict = os.path.basename(snake_case__ )
__snake_case :Dict = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(snake_case__ ,"""r""" ,encoding="""utf-8""" ) as f, open(snake_case__ ,encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" ,["""zip""", """gzip"""] )
def UpperCamelCase ( snake_case__ : List[Any] ,snake_case__ : List[Any] ,snake_case__ : Dict ):
'''simple docstring'''
__snake_case :Optional[int] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
__snake_case :List[str] = compressed_file_paths[protocol]
__snake_case :Optional[int] = """dataset.jsonl"""
__snake_case :Optional[int] = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
__snake_case , *__snake_case :Optional[Any] = fsspec.get_fs_token_paths(snake_case__ )
assert fs.isfile(snake_case__ )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def UpperCamelCase ( snake_case__ : Optional[int] ,snake_case__ : Dict ,snake_case__ : List[str] ,snake_case__ : Dict ):
'''simple docstring'''
__snake_case :List[Any] = hf_api.dataset_info(snake_case__ ,token=snake_case__ )
__snake_case :List[str] = HfFileSystem(repo_info=snake_case__ ,token=snake_case__ )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(snake_case__ ) as f:
assert hffs.open("""data/text_data.txt""" ,"""r""" ).read() == f.read()
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :str = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case__ ,snake_case__ ,clobber=snake_case__ )
with pytest.warns(snake_case__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case__ ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 455
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( _a : Optional[Any] ) -> Union[str, Any]:
for param in module.parameters():
lowerCAmelCase_ : Union[str, Any] = False
def _lowerCAmelCase ( ) -> Dict:
lowerCAmelCase_ : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase_ : Optional[int] = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def _lowerCAmelCase ( _a : Any ) -> Tuple:
lowerCAmelCase_ : Dict = plt.imshow(_a )
fig.axes.get_xaxis().set_visible(_a )
fig.axes.get_yaxis().set_visible(_a )
plt.show()
def _lowerCAmelCase ( ) -> List[Any]:
lowerCAmelCase_ : Tuple = datetime.now()
lowerCAmelCase_ : Tuple = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 440
|
from __future__ import annotations
def _lowerCAmelCase ( _a : list[int] ) -> list[int]: # This function is recursive
lowerCAmelCase_ : List[Any] = len(_a )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase_ : Union[str, Any] = array[0]
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Tuple = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase_ : Any = longest_subsequence(_a )
if len(_a ) > len(_a ):
lowerCAmelCase_ : List[Any] = temp_array
else:
i += 1
lowerCAmelCase_ : Tuple = [element for element in array[1:] if element >= pivot]
lowerCAmelCase_ : List[Any] = [pivot, *longest_subsequence(_a )]
if len(_a ) > len(_a ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440
| 1
|
def UpperCAmelCase_ ( __UpperCAmelCase : int = 50 ) -> int:
SCREAMING_SNAKE_CASE_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 31
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_lowerCamelCase : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_lowerCamelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : int=None , lowercase : str=True , lowercase : List[str]=False ):
'''simple docstring'''
if rouge_types is None:
_snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_snake_case = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
_snake_case = scoring.BootstrapAggregator()
else:
_snake_case = []
for ref, pred in zip(lowercase , lowercase ):
_snake_case = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
_snake_case = aggregator.aggregate()
else:
_snake_case = {}
for key in scores[0]:
_snake_case = [score[key] for score in scores]
return result
| 686
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCAmelCase_ : int = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : str = "MobileNetV1Config"
# Base docstring
UpperCAmelCase_ : Optional[int] = "google/mobilenet_v1_1.0_224"
UpperCAmelCase_ : Optional[int] = [1, 1024, 7, 7]
# Image classification docstring
UpperCAmelCase_ : Any = "google/mobilenet_v1_1.0_224"
UpperCAmelCase_ : Union[str, Any] = "tabby, tabby cat"
UpperCAmelCase_ : Optional[int] = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCAmelCase_ ( A , A , A=None ):
'''simple docstring'''
_a : Optional[Any] = {}
if isinstance(A , A ):
_a : Optional[Any] = model.mobilenet_va
else:
_a : int = model
_a : Optional[Any] = 'MobilenetV1/Conv2d_0/'
_a : List[Any] = backbone.conv_stem.convolution.weight
_a : List[Any] = backbone.conv_stem.normalization.bias
_a : str = backbone.conv_stem.normalization.weight
_a : Union[str, Any] = backbone.conv_stem.normalization.running_mean
_a : Optional[Any] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
_a : List[Any] = i + 1
_a : Union[str, Any] = i * 2
_a : List[str] = backbone.layer[pt_index]
_a : List[Any] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_a : Optional[Any] = pointer.convolution.weight
_a : List[str] = pointer.normalization.bias
_a : List[Any] = pointer.normalization.weight
_a : Tuple = pointer.normalization.running_mean
_a : List[str] = pointer.normalization.running_var
_a : Any = backbone.layer[pt_index + 1]
_a : Dict = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_a : Dict = pointer.convolution.weight
_a : Tuple = pointer.normalization.bias
_a : str = pointer.normalization.weight
_a : Union[str, Any] = pointer.normalization.running_mean
_a : int = pointer.normalization.running_var
if isinstance(A , A ):
_a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
_a : int = model.classifier.weight
_a : List[str] = model.classifier.bias
return tf_to_pt_map
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
_a : str = tf.train.list_variables(A )
_a : Optional[int] = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
_a : List[Any] = tf.train.load_variable(A , A )
_a : Optional[Any] = array
# Build TF to PyTorch weights loading map
_a : Optional[Any] = _build_tf_to_pytorch_map(A , A , A )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
_a : List[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
_a : Dict = np.transpose(A , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
_a : Any = array.squeeze().transpose()
else:
_a : List[Any] = np.transpose(A , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
_a : Any = torch.from_numpy(A )
tf_weights.pop(A , A )
tf_weights.pop(name + '/RMSProp' , A )
tf_weights.pop(name + '/RMSProp_1' , A )
tf_weights.pop(name + '/ExponentialMovingAverage' , A )
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a , _a : int = features.shape[-2:]
_a , _a : int = conv_layer.stride
_a , _a : Optional[int] = conv_layer.kernel_size
if in_height % stride_height == 0:
_a : Optional[Any] = max(kernel_height - stride_height , 0 )
else:
_a : Any = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_a : int = max(kernel_width - stride_width , 0 )
else:
_a : int = max(kernel_width - (in_width % stride_width) , 0 )
_a : Any = pad_along_width // 2
_a : List[str] = pad_along_width - pad_left
_a : Dict = pad_along_height // 2
_a : str = pad_along_height - pad_top
_a : Any = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A , A , 'constant' , 0.0 )
class a ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = False , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> None:
super().__init__()
_a : Optional[Any] = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
_a : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_a : str = nn.Convad(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=lowerCamelCase_ , groups=lowerCamelCase_ , bias=lowerCamelCase_ , padding_mode='zeros' , )
if use_normalization:
_a : Tuple = nn.BatchNormad(
num_features=lowerCamelCase_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCamelCase_ , track_running_stats=lowerCamelCase_ , )
else:
_a : Optional[Any] = None
if use_activation:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_a : Any = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCamelCase_ ):
_a : str = ACTaFN[config.hidden_act]
else:
_a : int = config.hidden_act
else:
_a : List[Any] = None
def __UpperCamelCase ( self , lowerCamelCase_ ) -> torch.Tensor:
if self.config.tf_padding:
_a : Optional[int] = apply_tf_padding(lowerCamelCase_ , self.convolution )
_a : str = self.convolution(lowerCamelCase_ )
if self.normalization is not None:
_a : Any = self.normalization(lowerCamelCase_ )
if self.activation is not None:
_a : str = self.activation(lowerCamelCase_ )
return features
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = MobileNetVaConfig
__lowerCAmelCase : str = load_tf_weights_in_mobilenet_va
__lowerCAmelCase : List[str] = """mobilenet_v1"""
__lowerCAmelCase : Any = """pixel_values"""
__lowerCAmelCase : Optional[Any] = False
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
if isinstance(lowerCamelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCamelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCAmelCase_ : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase_ : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , snake_case__ , )
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Optional[Any]:
super().__init__(lowerCamelCase_ )
_a : Tuple = config
_a : Dict = 3_2
_a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
_a : Optional[int] = MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=config.num_channels , out_channels=lowerCamelCase_ , kernel_size=3 , stride=2 , )
_a : Union[str, Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_a : Optional[Any] = nn.ModuleList()
for i in range(1_3 ):
_a : int = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_a : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=3 , stride=strides[i] , groups=lowerCamelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=1 , ) )
_a : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
_a : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_a : List[str] = self.conv_stem(lowerCamelCase_ )
_a : List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_a : List[str] = layer_module(lowerCamelCase_ )
if output_hidden_states:
_a : Tuple = all_hidden_states + (hidden_states,)
_a : Tuple = hidden_states
if self.pooler is not None:
_a : Optional[int] = torch.flatten(self.pooler(lowerCamelCase_ ) , start_dim=1 )
else:
_a : Union[str, Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=lowerCamelCase_ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case__ , )
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> None:
super().__init__(lowerCamelCase_ )
_a : List[Any] = config.num_labels
_a : Optional[Any] = MobileNetVaModel(lowerCamelCase_ )
_a : Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCamelCase_ )
_a : int = nn.Linear(lowerCamelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
_a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[int] = self.mobilenet_va(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
_a : Any = outputs.pooler_output if return_dict else outputs[1]
_a : str = self.classifier(self.dropout(lowerCamelCase_ ) )
_a : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Optional[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : List[Any] = 'single_label_classification'
else:
_a : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
_a : List[Any] = MSELoss()
if self.num_labels == 1:
_a : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a : List[Any] = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
_a : List[str] = CrossEntropyLoss()
_a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : Optional[Any] = BCEWithLogitsLoss()
_a : Dict = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
_a : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states , )
| 424
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : List[str] = Mock()
_a : str = conn, Mock()
_a : Union[str, Any] = iter([1, None] )
_a : List[str] = lambda A : next(A )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 424
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Tuple = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : List[Any] = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : str = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _snake_case ( metaclass=lowercase_ ):
lowerCAmelCase_ : List[Any] = ["torch", "transformers", "onnx"]
def __init__( self , *a__ , **a__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 400
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Tuple = "t5"
lowerCAmelCase_ : int = ["past_key_values"]
lowerCAmelCase_ : List[Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , a__=32_128 , a__=512 , a__=64 , a__=2_048 , a__=6 , a__=None , a__=8 , a__=32 , a__=128 , a__=0.1 , a__=1e-6 , a__=1.0 , a__="relu" , a__=True , a__=True , a__=0 , a__=1 , **a__ , ) -> str:
'''simple docstring'''
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = d_kv
snake_case_ = d_ff
snake_case_ = num_layers
snake_case_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case_ = num_heads
snake_case_ = relative_attention_num_buckets
snake_case_ = relative_attention_max_distance
snake_case_ = dropout_rate
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_factor
snake_case_ = feed_forward_proj
snake_case_ = use_cache
snake_case_ = self.feed_forward_proj.split("-" )
snake_case_ = act_info[-1]
snake_case_ = act_info[0] == "gated"
if len(a__ ) > 1 and act_info[0] != "gated" or len(a__ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
snake_case_ = "gelu_new"
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , **a__ , )
class _snake_case ( lowercase_ ):
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
snake_case_ = "past_encoder_sequence + sequence"
snake_case_ = {0: "batch"}
snake_case_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case_ = {0: "batch", 1: "decoder_sequence"}
snake_case_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="inputs" )
return common_inputs
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 13
| 400
| 1
|
class __a :
def __init__( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
lowercase__: List[Any] = set_counts
lowercase__: Union[str, Any] = max(lowerCAmelCase__ )
lowercase__: List[Any] = len(lowerCAmelCase__ )
lowercase__: Any = [1] * num_sets
lowercase__: Dict = list(range(lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
lowercase__: int = self.get_parent(lowerCAmelCase__ )
lowercase__: Dict = self.get_parent(lowerCAmelCase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase__: Any = 0
lowercase__: Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase__: Any = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase__: Dict = 0
lowercase__: int = src_parent
lowercase__: Optional[int] = self.set_counts[src_parent]
lowercase__: List[str] = max(self.max_set , lowerCAmelCase__ )
return True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase__: Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 335
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
__lowerCAmelCase = '''=======
>>>>>>>
'''
__lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
__lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def snake_case_ ( snake_case ) -> Union[str, Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __a ( __UpperCamelCase ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = get_logger('datasets-cli/converting' )
lowercase__: Any = tfds_path
lowercase__: str = datasets_directory
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
lowercase__: int = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
lowercase__: str = os.path.abspath(self._datasets_directory )
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase__: Union[str, Any] = []
lowercase__: List[str] = []
lowercase__: str = {}
if os.path.isdir(self._tfds_path ):
lowercase__: List[Any] = os.listdir(lowerCAmelCase__ )
else:
lowercase__: Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}' )
lowercase__: List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not os.path.isfile(lowerCAmelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
lowercase__: Any = f.readlines()
lowercase__: List[str] = []
lowercase__: List[Any] = False
lowercase__: Any = False
lowercase__: Dict = []
for line in lines:
lowercase__: Tuple = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: List[Any] = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Optional[Any] = ''
continue
elif "from absl import logging" in out_line:
lowercase__: str = 'from datasets import logging\n'
elif "getLogger" in out_line:
lowercase__: Dict = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Tuple = True
lowercase__: int = list(filter(lambda lowerCAmelCase__ : e in out_line , lowerCAmelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase__ ) + '\n' )
out_lines.append(lowerCAmelCase__ )
out_lines.append(lowerCAmelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: Any = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Tuple = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
lowercase__: Dict = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: List[str] = True
out_lines.append(lowerCAmelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('.py' , '' )
lowercase__: Optional[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Any = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
self._logger.info(F'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase__ )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase__ )
self._logger.info(F'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(lowerCAmelCase__ )
lowercase__: int = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase__ , lowerCAmelCase__ )
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 335
| 1
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __a ( A__ : Any ):
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("_" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 192
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 21841
else:
SCREAMING_SNAKE_CASE = 1000
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __a ( A__ : List[str] ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
SCREAMING_SNAKE_CASE = "encoder." + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = "layernorm.weight"
if name == "norm.bias":
SCREAMING_SNAKE_CASE = "layernorm.bias"
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
else:
SCREAMING_SNAKE_CASE = "swin." + name
return name
def __a ( A__ : Optional[Any] , A__ : Any ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(A__ )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split("." )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __a ( A__ : Optional[Any] , A__ : Any ):
SCREAMING_SNAKE_CASE = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(A__ )
SCREAMING_SNAKE_CASE = SwinForImageClassification(A__ )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , A__ )
model.load_state_dict(A__ )
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = timm_model(inputs["pixel_values"] )
SCREAMING_SNAKE_CASE = model(**A__ ).logits
assert torch.allclose(A__ , A__ , atol=1E-3 )
print(F"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__A : List[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 16
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A : Dict = logging.get_logger(__name__)
@add_end_docstrings(a )
class __A( a ):
def __init__( self , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
super().__init__(*_snake_case , **_snake_case )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None ) -> Tuple:
'''simple docstring'''
__a = {}
__a = {}
if prompt is not None:
__a = prompt
if generate_kwargs is not None:
__a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
__a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
return super().__call__(_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> Optional[int]:
'''simple docstring'''
__a = load_image(_snake_case )
if prompt is not None:
if not isinstance(_snake_case , _snake_case ):
raise ValueError(
F"""Received an invalid text input, got - {type(_snake_case )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
__a = self.model.config.model_type
if model_type == "git":
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
__a = self.tokenizer(text=_snake_case , add_special_tokens=_snake_case ).input_ids
__a = [self.tokenizer.cls_token_id] + input_ids
__a = torch.tensor(_snake_case ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
__a = self.image_processor(images=_snake_case , header_text=_snake_case , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
__a = self.tokenizer(_snake_case , return_tensors=self.framework )
model_inputs.update(_snake_case )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__a = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> str:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , _snake_case )
and all(x is None for x in model_inputs['''input_ids'''] )
):
__a = None
if generate_kwargs is None:
__a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__a = model_inputs.pop(self.model.main_input_name )
__a = self.model.generate(_snake_case , **_snake_case , **_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = []
for output_ids in model_outputs:
__a = {
'''generated_text''': self.tokenizer.decode(
_snake_case , skip_special_tokens=_snake_case , )
}
records.append(_snake_case )
return records
| 219
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
if b == 0:
return (1, 0)
(A__) : Union[str, Any] =extended_euclid(UpperCamelCase , a % b )
A__ : Dict =a // b
return (y, x - k * y)
def lowercase ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
(A__) : Dict =extended_euclid(UpperCamelCase , UpperCamelCase )
A__ : Dict =na * na
A__ : Any =ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
(A__) : Any =extended_euclid(UpperCamelCase , UpperCamelCase )
if b < 0:
A__ : Tuple =(b % n + n) % n
return b
def lowercase ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
A__ : int =invert_modulo(UpperCamelCase , UpperCamelCase ), invert_modulo(UpperCamelCase , UpperCamelCase )
A__ : Any =na * na
A__ : Any =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 717
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__A : Any = "facebook/wmt19-en-de"
__A : List[str] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__A : Dict = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__A : List[Any] = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__A : Tuple = tokenizer(["Making tiny model"], return_tensors="pt")
__A : int = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
__A : Any = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 595
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.