code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class _lowerCamelCase : '''simple docstring''' A_ : Optional[int] = None A_ : Optional[jnp.ndarray] = None A_ : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def __lowerCAmelCase ( cls : Any ) -> Dict: return cls() @dataclass class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : jnp.ndarray A_ : jnp.ndarray A_ : KarrasVeSchedulerState class _lowerCamelCase ( lowercase__ , lowercase__ ): '''simple docstring''' @property def __lowerCAmelCase ( self : str ) -> List[Any]: return True @register_to_config def __init__( self : Union[str, Any] , _A : float = 0.02 , _A : float = 100 , _A : float = 1.007 , _A : float = 80 , _A : float = 0.05 , _A : float = 50 , ) -> int: pass def __lowerCAmelCase ( self : int ) -> Any: return KarrasVeSchedulerState.create() def __lowerCAmelCase ( self : Dict , _A : KarrasVeSchedulerState , _A : int , _A : Tuple = () ) -> KarrasVeSchedulerState: __magic_name__ : int = jnp.arange(0 , _A )[::-1].copy() __magic_name__ : Any = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=_A , schedule=jnp.array(_A , dtype=jnp.floataa ) , timesteps=_A , ) def __lowerCAmelCase ( self : Any , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : random.KeyArray , ) -> Tuple[jnp.ndarray, float]: if self.config.s_min <= sigma <= self.config.s_max: __magic_name__ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: __magic_name__ : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) __magic_name__ : Any = random.split(_A , num=1 ) __magic_name__ : Optional[int] = self.config.s_noise * random.normal(key=_A , shape=sample.shape ) __magic_name__ : Any = sigma + gamma * sigma __magic_name__ : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __lowerCAmelCase ( self : List[str] , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: __magic_name__ : Union[str, Any] = sample_hat + sigma_hat * model_output __magic_name__ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat __magic_name__ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A ) def __lowerCAmelCase ( self : Union[str, Any] , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : jnp.ndarray , _A : jnp.ndarray , _A : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: __magic_name__ : Optional[Any] = sample_prev + sigma_prev * model_output __magic_name__ : Tuple = (sample_prev - pred_original_sample) / sigma_prev __magic_name__ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A ) def __lowerCAmelCase ( self : List[Any] , _A : KarrasVeSchedulerState , _A : Optional[Any] , _A : Any , _A : str ) -> List[str]: raise NotImplementedError()
331
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase :Tuple = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase :List[Any] = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase :str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase :Optional[Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]), ('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase :Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase :Tuple = ( ('''JH AH TH KH QH''', 2_3), ('''JH 9H TH KH QH''', 2_2), ('''JC KH JS JD JH''', 2_1), ('''KH KC 3S 3H 3D''', 2_0), ('''8C 9C 5C 3C TC''', 1_9), ('''JS QS 9H TS KH''', 1_8), ('''7C 7S KH 2H 7H''', 1_7), ('''3C KH 5D 5S KH''', 1_6), ('''QH 8H KD JH 8S''', 1_5), ('''2D 6D 9D TH 7D''', 1_4), ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) ) __magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase ( lowerCAmelCase : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(lowerCAmelCase )) @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ): """simple docstring""" __magic_name__ : Any = PokerHand(lowerCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ): """simple docstring""" assert PokerHand(lowerCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ): """simple docstring""" assert PokerHand(lowerCAmelCase )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ): """simple docstring""" assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS] __magic_name__ : Tuple = poker_hands.copy() shuffle(lowerCAmelCase ) __magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) ) for index, hand in enumerate(lowerCAmelCase ): assert hand == poker_hands[index] def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowerCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' ) __magic_name__ : Optional[Any] = True __magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase ( ): """simple docstring""" __magic_name__ : Dict = 0 __magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) ) __magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' ) with open(lowerCAmelCase ) as file_hand: for line in file_hand: __magic_name__ : Optional[int] = line[:14].strip() __magic_name__ : List[Any] = line[15:].strip() __magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase ) __magic_name__ : List[Any] = player.compare_with(lowerCAmelCase ) if output == "Win": answer += 1 assert answer == 376
331
1
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class __UpperCamelCase ( _SCREAMING_SNAKE_CASE ): lowerCamelCase : Union[str, Any] =42 class __UpperCamelCase ( nn.Module ): def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=("DownEncoderBlock2D",) , lowerCAmelCase__=(64,) , lowerCAmelCase__=2 , lowerCAmelCase__=32 , lowerCAmelCase__="silu" , lowerCAmelCase__=True , ) -> str: super().__init__() a : str = layers_per_block a : List[str] = torch.nn.Convad( lowerCAmelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) a : List[Any] = None a : Optional[Any] = nn.ModuleList([] ) # down a : Tuple = block_out_channels[0] for i, down_block_type in enumerate(lowerCAmelCase__ ): a : Tuple = output_channel a : str = block_out_channels[i] a : Optional[int] = i == len(lowerCAmelCase__ ) - 1 a : int = get_down_block( lowerCAmelCase__ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , ) self.down_blocks.append(lowerCAmelCase__ ) # mid a : Tuple = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , ) # out a : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase__ , eps=1E-6 ) a : str = nn.SiLU() a : List[str] = 2 * out_channels if double_z else out_channels a : Tuple = nn.Convad(block_out_channels[-1] , lowerCAmelCase__ , 3 , padding=1 ) a : Optional[Any] = False def __a ( self , lowerCAmelCase__ ) -> Optional[Any]: a : List[Any] = x a : List[str] = self.conv_in(lowerCAmelCase__ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCAmelCase__ ): def custom_forward(*lowerCAmelCase__ ): return module(*lowerCAmelCase__ ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: a : Optional[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ ) # middle a : Optional[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ ) else: for down_block in self.down_blocks: a : int = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ ) # middle a : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase__ ) else: # down for down_block in self.down_blocks: a : Union[str, Any] = down_block(lowerCAmelCase__ ) # middle a : int = self.mid_block(lowerCAmelCase__ ) # post-process a : Tuple = self.conv_norm_out(lowerCAmelCase__ ) a : List[str] = self.conv_act(lowerCAmelCase__ ) a : Any = self.conv_out(lowerCAmelCase__ ) return sample class __UpperCamelCase ( nn.Module ): def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=("UpDecoderBlock2D",) , lowerCAmelCase__=(64,) , lowerCAmelCase__=2 , lowerCAmelCase__=32 , lowerCAmelCase__="silu" , lowerCAmelCase__="group" , ) -> List[Any]: super().__init__() a : int = layers_per_block a : Dict = nn.Convad( lowerCAmelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) a : List[str] = None a : int = nn.ModuleList([] ) a : str = in_channels if norm_type == "spatial" else None # mid a : List[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , ) # up a : Tuple = list(reversed(lowerCAmelCase__ ) ) a : Dict = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCAmelCase__ ): a : Tuple = output_channel a : Any = reversed_block_out_channels[i] a : str = i == len(lowerCAmelCase__ ) - 1 a : Tuple = get_up_block( lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , resnet_time_scale_shift=lowerCAmelCase__ , ) self.up_blocks.append(lowerCAmelCase__ ) a : Dict = output_channel # out if norm_type == "spatial": a : Tuple = SpatialNorm(block_out_channels[0] , lowerCAmelCase__ ) else: a : List[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase__ , eps=1E-6 ) a : Union[str, Any] = nn.SiLU() a : Tuple = nn.Convad(block_out_channels[0] , lowerCAmelCase__ , 3 , padding=1 ) a : Any = False def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str: a : Dict = z a : Dict = self.conv_in(lowerCAmelCase__ ) a : int = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCAmelCase__ ): def custom_forward(*lowerCAmelCase__ ): return module(*lowerCAmelCase__ ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle a : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ ) a : str = sample.to(lowerCAmelCase__ ) # up for up_block in self.up_blocks: a : int = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ ) else: # middle a : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ ) a : int = sample.to(lowerCAmelCase__ ) # up for up_block in self.up_blocks: a : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ ) else: # middle a : str = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ ) a : Union[str, Any] = sample.to(lowerCAmelCase__ ) # up for up_block in self.up_blocks: a : List[Any] = up_block(lowerCAmelCase__ , lowerCAmelCase__ ) # post-process if latent_embeds is None: a : List[str] = self.conv_norm_out(lowerCAmelCase__ ) else: a : Dict = self.conv_norm_out(lowerCAmelCase__ , lowerCAmelCase__ ) a : Tuple = self.conv_act(lowerCAmelCase__ ) a : Any = self.conv_out(lowerCAmelCase__ ) return sample class __UpperCamelCase ( nn.Module ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="random" , lowerCAmelCase__=False , lowerCAmelCase__=True ) -> Union[str, Any]: super().__init__() a : str = n_e a : int = vq_embed_dim a : Optional[int] = beta a : Optional[int] = legacy a : List[Any] = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) a : Any = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) a : Union[str, Any] = self.used.shape[0] a : List[str] = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": a : List[str] = self.re_embed a : int = self.re_embed + 1 print( f"""Remapping {self.n_e} indices to {self.re_embed} indices. """ f"""Using {self.unknown_index} for unknown indices.""" ) else: a : Tuple = n_e a : Union[str, Any] = sane_index_shape def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]: a : Dict = inds.shape assert len(lowerCAmelCase__ ) > 1 a : Dict = inds.reshape(ishape[0] , -1 ) a : int = self.used.to(lowerCAmelCase__ ) a : Any = (inds[:, :, None] == used[None, None, ...]).long() a : int = match.argmax(-1 ) a : List[Any] = match.sum(2 ) < 1 if self.unknown_index == "random": a : str = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: a : Optional[int] = self.unknown_index return new.reshape(lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ ) -> Dict: a : List[Any] = inds.shape assert len(lowerCAmelCase__ ) > 1 a : Dict = inds.reshape(ishape[0] , -1 ) a : str = self.used.to(lowerCAmelCase__ ) if self.re_embed > self.used.shape[0]: # extra token a : Optional[Any] = 0 # simply set to zero a : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase__ ) return back.reshape(lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ ) -> Optional[Any]: # reshape z -> (batch, height, width, channel) and flatten a : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous() a : Optional[int] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z a : Tuple = torch.argmin(torch.cdist(lowerCAmelCase__ , self.embedding.weight ) , dim=1 ) a : str = self.embedding(lowerCAmelCase__ ).view(z.shape ) a : int = None a : Optional[Any] = None # compute loss for embedding if not self.legacy: a : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: a : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients a : Optional[Any] = z + (z_q - z).detach() # reshape back to match original input shape a : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: a : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis a : int = self.remap_to_used(lowerCAmelCase__ ) a : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: a : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: # shape specifying (batch, height, width, channel) if self.remap is not None: a : str = indices.reshape(shape[0] , -1 ) # add batch axis a : Any = self.unmap_to_all(lowerCAmelCase__ ) a : List[str] = indices.reshape(-1 ) # flatten again # get quantized latent vectors a : str = self.embedding(lowerCAmelCase__ ) if shape is not None: a : Dict = z_q.view(lowerCAmelCase__ ) # reshape back to match original input shape a : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class __UpperCamelCase ( _SCREAMING_SNAKE_CASE ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Dict: a : Any = parameters a, a : Optional[int] = torch.chunk(lowerCAmelCase__ , 2 , dim=1 ) a : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) a : Optional[Any] = deterministic a : int = torch.exp(0.5 * self.logvar ) a : Union[str, Any] = torch.exp(self.logvar ) if self.deterministic: a : Tuple = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def __a ( self , lowerCAmelCase__ = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype a : Optional[int] = randn_tensor( self.mean.shape , generator=lowerCAmelCase__ , device=self.parameters.device , dtype=self.parameters.dtype ) a : List[Any] = self.mean + self.std * sample return x def __a ( self , lowerCAmelCase__=None ) -> Dict: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=[1, 2, 3] ) -> int: if self.deterministic: return torch.Tensor([0.0] ) a : Optional[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase__ ) def __a ( self ) -> Optional[int]: return self.mean
365
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a : Dict = logging.get_logger(__name__) a : List[Any] = { '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class __UpperCamelCase ( a__ ): lowerCamelCase : Tuple ="""open-llama""" def __init__( self , lowerCAmelCase__=10_0000 , lowerCAmelCase__=4096 , lowerCAmelCase__=1_1008 , lowerCAmelCase__=32 , lowerCAmelCase__=32 , lowerCAmelCase__="silu" , lowerCAmelCase__=2048 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-6 , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple: a : Any = vocab_size a : List[str] = max_position_embeddings a : int = hidden_size a : str = intermediate_size a : List[str] = num_hidden_layers a : int = num_attention_heads a : Dict = hidden_act a : Union[str, Any] = initializer_range a : Tuple = rms_norm_eps a : Union[str, Any] = use_cache a : Union[str, Any] = kwargs.pop( "use_memorry_efficient_attention" , lowerCAmelCase__ ) a : int = hidden_dropout_prob a : Tuple = attention_dropout_prob a : Optional[Any] = use_stable_embedding a : str = shared_input_output_embedding a : str = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ , ) def __a ( self ) -> Union[str, Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"""got {self.rope_scaling}""" ) a : Any = self.rope_scaling.get("type" , lowerCAmelCase__ ) a : List[str] = self.rope_scaling.get("factor" , lowerCAmelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
79
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , _A : List[str] , _A : Dict=13 , _A : Dict=3 , _A : List[Any]=224 , _A : List[str]=30 , _A : str=400 , _A : Dict=True , _A : Dict=None , _A : List[str]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Tuple=[0.5, 0.5, 0.5] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'height': 18, 'width': 18} __SCREAMING_SNAKE_CASE : List[str] = parent __SCREAMING_SNAKE_CASE : str = batch_size __SCREAMING_SNAKE_CASE : int = num_channels __SCREAMING_SNAKE_CASE : Tuple = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : int = do_resize __SCREAMING_SNAKE_CASE : str = size __SCREAMING_SNAKE_CASE : List[Any] = do_normalize __SCREAMING_SNAKE_CASE : int = image_mean __SCREAMING_SNAKE_CASE : Optional[Any] = image_std def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __UpperCamelCase ( snake_case__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ViTImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = EfficientFormerImageProcessorTester(self ) @property def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" pass def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : List[Any] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processor(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : Tuple = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processor(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
303
_a = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)] def _a ( SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" __lowerCAmelCase: Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00] number //= 10_00_00 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution _a = [None] * 1_0_0_0_0_0_0_0 _a = True _a = False def _a ( SCREAMING_SNAKE_CASE : int ) -> bool: """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore __lowerCAmelCase: int = chain(next_number(SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase: Tuple = number_chain while number < 10_00_00_00: __lowerCAmelCase: Dict = number_chain number *= 10 return number_chain def _a ( SCREAMING_SNAKE_CASE : int = 10_00_00_00 ) -> int: """simple docstring""" for i in range(1 , SCREAMING_SNAKE_CASE ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() print(f"{solution() = }")
322
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : str = logging.get_logger(__name__) def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE_: List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ): for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE_: int = "" else: SCREAMING_SNAKE_CASE_: Tuple = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE_: Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) SCREAMING_SNAKE_CASE_: int = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE_: List[Any] = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE_: Tuple = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE_: int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE_: Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE_: str = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE_: int = in_proj_bias[-config.hidden_size :] def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = dct.pop(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = val def A_ ( ): SCREAMING_SNAKE_CASE_: str = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE_: Dict = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return im @torch.no_grad() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ): SCREAMING_SNAKE_CASE_: List[str] = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE_: Any = ViTHybridConfig(backbone_config=_UpperCAmelCase , image_size=3_84 , num_labels=10_00 ) SCREAMING_SNAKE_CASE_: Dict = False # load original model from timm SCREAMING_SNAKE_CASE_: Optional[Any] = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE_: Optional[int] = timm_model.state_dict() if base_model: remove_classification_head_(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase ) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = "huggingface/label-files" SCREAMING_SNAKE_CASE_: Any = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE_: str = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_: Any = idalabel SCREAMING_SNAKE_CASE_: List[Any] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": SCREAMING_SNAKE_CASE_: Any = ViTHybridModel(_UpperCAmelCase ).eval() else: SCREAMING_SNAKE_CASE_: str = ViTHybridForImageClassification(_UpperCAmelCase ).eval() model.load_state_dict(_UpperCAmelCase ) # create image processor SCREAMING_SNAKE_CASE_: List[str] = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Dict = transform.transforms SCREAMING_SNAKE_CASE_: Any = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } SCREAMING_SNAKE_CASE_: Any = ViTHybridImageProcessor( do_resize=_UpperCAmelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) SCREAMING_SNAKE_CASE_: Dict = prepare_img() SCREAMING_SNAKE_CASE_: int = transform(_UpperCAmelCase ).unsqueeze(0 ) SCREAMING_SNAKE_CASE_: Optional[int] = processor(_UpperCAmelCase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ) # verify logits with torch.no_grad(): SCREAMING_SNAKE_CASE_: List[Any] = model(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: SCREAMING_SNAKE_CASE_: Optional[Any] = timm_model.forward_features(_UpperCAmelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1e-3 ) else: SCREAMING_SNAKE_CASE_: Optional[int] = timm_model(_UpperCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_UpperCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: print(f"Pushing model and processor to the hub {vit_name}" ) model.push_to_hub(f"ybelkada/{vit_name}" ) processor.push_to_hub(f"ybelkada/{vit_name}" ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
359
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[int] = { """configuration_nllb_moe""": [ """NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NllbMoeConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ """NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""", """NllbMoeForConditionalGeneration""", """NllbMoeModel""", """NllbMoePreTrainedModel""", """NllbMoeTop2Router""", """NllbMoeSparseMLP""", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
127
0
'''simple docstring''' def __lowerCamelCase ( A__ = 1_000 ) -> int: """simple docstring""" return sum(e for e in range(3 , __UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f'''{solution() = }''')
28
"""simple docstring""" import re def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" try: __A = split_input(__UpperCamelCase ) if upper: __A = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __A = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" return to_simple_case(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" try: __A = to_simple_case(__UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__('doctest').testmod()
266
0
from datetime import datetime import requests def _A ( lowerCAmelCase_ : Dict ): """simple docstring""" lowerCAmelCase__ = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" lowerCAmelCase__ = requests.get(base_url + url ).json()[0]["urls"][0]["src"] return requests.get(__UpperCAmelCase ).content if __name__ == "__main__": UpperCamelCase = input('Enter Video/IGTV url: ').strip() UpperCamelCase = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
356
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , **lowerCAmelCase_ : str ): """simple docstring""" lowerCAmelCase__ = AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
221
0
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : int=30 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Any=32 , __UpperCAmelCase : int=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : List[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Any=10 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Union[str, Any]=0.6 , __UpperCAmelCase : Any=None , ): '''simple docstring''' _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range _A = mask_ratio _A = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _A = (image_size // patch_size) ** 2 _A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ): '''simple docstring''' _A = ViTMAEModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ): '''simple docstring''' _A = ViTMAEForPreTraining(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = model(__UpperCAmelCase ) _A = (self.image_size // self.patch_size) ** 2 _A = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _A = 1 _A = ViTMAEForPreTraining(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(__UpperCAmelCase ) _A = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () snake_case = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} snake_case = False snake_case = False snake_case = False snake_case = False def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = ViTMAEModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' pass def lowerCAmelCase ( self : int ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase ) def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ): '''simple docstring''' np.random.seed(2 ) _A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _A = torch.from_numpy(__UpperCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _A = pt_noise super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _A = outputs[0].cpu().numpy() _A = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) _A = model_class.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) # Make sure we don't have nans _A = after_outputs[0].cpu().numpy() _A = 0 _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCAmelCase , 1E-5 ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCAmelCase ( self : Any ): '''simple docstring''' pass @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = ViTMAEModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __lowercase ( ) -> Optional[int]: '''simple docstring''' _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCAmelCase ( self : Any ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' np.random.seed(2 ) _A = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCAmelCase ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _A = ViTMAEConfig() _A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _A = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): _A = model(**__UpperCAmelCase , noise=torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) ) # verify the logits _A = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _A = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCAmelCase ) , atol=1E-4 ) )
79
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
1
from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowerCamelCase : Dict ="trajectory_transformer" lowerCamelCase : Dict =["past_key_values"] lowerCamelCase : Optional[int] ={ "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Any , lowerCAmelCase : str=1_00 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=2_49 , lowerCAmelCase : Any=6 , lowerCAmelCase : List[Any]=17 , lowerCAmelCase : Tuple=25 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : int=4 , lowerCAmelCase : List[str]=1_28 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[Any]=0.0006 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Dict=1e-12 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=1 , lowerCAmelCase : List[str]=5_02_56 , lowerCAmelCase : Optional[Any]=5_02_56 , **lowerCAmelCase : Any , ) -> Dict: """simple docstring""" __lowerCAmelCase : List[str] = vocab_size __lowerCAmelCase : Tuple = action_weight __lowerCAmelCase : Optional[Any] = reward_weight __lowerCAmelCase : Tuple = value_weight __lowerCAmelCase : Optional[int] = max_position_embeddings __lowerCAmelCase : Optional[Any] = block_size __lowerCAmelCase : Dict = action_dim __lowerCAmelCase : Any = observation_dim __lowerCAmelCase : Dict = transition_dim __lowerCAmelCase : List[str] = learning_rate __lowerCAmelCase : Union[str, Any] = n_layer __lowerCAmelCase : List[Any] = n_head __lowerCAmelCase : Dict = n_embd __lowerCAmelCase : int = embd_pdrop __lowerCAmelCase : Tuple = attn_pdrop __lowerCAmelCase : int = resid_pdrop __lowerCAmelCase : List[str] = initializer_range __lowerCAmelCase : Any = layer_norm_eps __lowerCAmelCase : Union[str, Any] = kaiming_initializer_range __lowerCAmelCase : Optional[int] = use_cache super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
361
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any]=13 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : int=False , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : Optional[int]=19 , lowerCAmelCase : Any=32 , lowerCAmelCase : int=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : Dict=16 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Union[str, Any]=None , ) -> List[str]: """simple docstring""" __lowerCAmelCase : List[str] = parent __lowerCAmelCase : Tuple = batch_size __lowerCAmelCase : Union[str, Any] = seq_length __lowerCAmelCase : int = is_training __lowerCAmelCase : Dict = use_input_mask __lowerCAmelCase : Dict = use_token_type_ids __lowerCAmelCase : Optional[int] = use_labels __lowerCAmelCase : str = vocab_size __lowerCAmelCase : Optional[int] = hidden_size __lowerCAmelCase : Union[str, Any] = num_hidden_layers __lowerCAmelCase : Union[str, Any] = num_attention_heads __lowerCAmelCase : Tuple = intermediate_size __lowerCAmelCase : List[str] = hidden_act __lowerCAmelCase : Optional[int] = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : Any = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : List[str] = initializer_range __lowerCAmelCase : int = num_labels __lowerCAmelCase : Tuple = num_choices __lowerCAmelCase : str = scope def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: """simple docstring""" __lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : int = None if self.use_input_mask: __lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : Any = None __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : Optional[int] = None if self.use_labels: __lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Tuple = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowerCAmelCase : Tuple = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCAmelCase , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , ) return config def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ) -> Tuple: """simple docstring""" __lowerCAmelCase : List[Any] = EsmForProteinFolding(config=lowerCAmelCase ).float() model.to(lowerCAmelCase ) model.eval() __lowerCAmelCase : Any = model(lowerCAmelCase , attention_mask=lowerCAmelCase ) __lowerCAmelCase : str = model(lowerCAmelCase ) __lowerCAmelCase : Optional[int] = model(lowerCAmelCase ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: """simple docstring""" __lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) ,( __lowerCAmelCase ) ,( __lowerCAmelCase ) ,( __lowerCAmelCase ) ,( __lowerCAmelCase ) ,( __lowerCAmelCase ) , ) : Optional[int] = config_and_inputs __lowerCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Tuple =False lowerCamelCase : int =(EsmForProteinFolding,) if is_torch_available() else () lowerCamelCase : int =() lowerCamelCase : Dict ={} if is_torch_available() else {} lowerCamelCase : str =False def SCREAMING_SNAKE_CASE ( self : int ) -> Any: """simple docstring""" __lowerCAmelCase : Union[str, Any] = EsmFoldModelTester(self ) __lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : str ) -> int: """simple docstring""" __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) @unittest.skip("""Does not support attention outputs""" ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: """simple docstring""" pass @unittest.skip def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""ESMFold does not support passing input embeds!""" ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def SCREAMING_SNAKE_CASE ( self : Any ) -> str: """simple docstring""" pass @unittest.skip("""ESMFold does not support head pruning.""" ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: """simple docstring""" pass @unittest.skip("""ESMFold does not output hidden states in the normal way.""" ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: """simple docstring""" pass @unittest.skip("""ESMfold does not output hidden states in the normal way.""" ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @unittest.skip("""ESMFold only has one output format.""" ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""ESMFold does not support input chunking.""" ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: """simple docstring""" pass @unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: """simple docstring""" pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""ESMFold doesn't support torchscript compilation.""" ) def SCREAMING_SNAKE_CASE ( self : int ) -> Any: """simple docstring""" pass @unittest.skip("""ESMFold doesn't support data parallel.""" ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @require_torch class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: """simple docstring""" __lowerCAmelCase : Dict = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float() model.eval() __lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __lowerCAmelCase : str = model(lowerCAmelCase )["""positions"""] __lowerCAmelCase : str = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCAmelCase , atol=1e-4 ) )
139
0
"""simple docstring""" from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance SCREAMING_SNAKE_CASE : List[str] = 6_378_137.0 SCREAMING_SNAKE_CASE : Tuple = 6_356_752.314_245 SCREAMING_SNAKE_CASE : Dict = 637_8137 def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float ) ->float: """simple docstring""" __snake_case : Any = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __snake_case : List[Any] = atan((1 - flattening) * tan(radians(_snake_case ) ) ) __snake_case : str = atan((1 - flattening) * tan(radians(_snake_case ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __snake_case : Union[str, Any] = haversine_distance(_snake_case , _snake_case , _snake_case , _snake_case ) / EQUATORIAL_RADIUS # Intermediate P and Q values __snake_case : Union[str, Any] = (b_lata + b_lata) / 2 __snake_case : Any = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __snake_case : List[str] = (sin(_snake_case ) ** 2) * (cos(_snake_case ) ** 2) __snake_case : Optional[Any] = cos(sigma / 2 ) ** 2 __snake_case : Optional[int] = (sigma - sin(_snake_case )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __snake_case : Optional[Any] = (cos(_snake_case ) ** 2) * (sin(_snake_case ) ** 2) __snake_case : List[Any] = sin(sigma / 2 ) ** 2 __snake_case : Optional[int] = (sigma + sin(_snake_case )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
102
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
0
"""simple docstring""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() __A : Any = logging.get_logger(__name__) def A_ ( snake_case_ : Dict ,snake_case_ : List[Any] ): '''simple docstring''' UpperCamelCase : List[str] = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append( (f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def A_ ( snake_case_ : List[Any] ,snake_case_ : str ): '''simple docstring''' for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) UpperCamelCase : Union[str, Any] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' ) UpperCamelCase : List[Any] = in_proj_weight[ : encoder_config.hidden_size, : ] UpperCamelCase : Union[str, Any] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] UpperCamelCase : Dict = in_proj_weight[ -encoder_config.hidden_size :, : ] def A_ ( snake_case_ : str ,snake_case_ : int ,snake_case_ : List[Any] ): '''simple docstring''' UpperCamelCase : Any = dct.pop(_UpperCamelCase ) UpperCamelCase : List[Any] = val def A_ ( snake_case_ : Tuple ): '''simple docstring''' if "handwritten" in checkpoint_url: UpperCamelCase : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: UpperCamelCase : Optional[Any] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" UpperCamelCase : List[Any] = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw ).convert("""RGB""" ) return im @torch.no_grad() def A_ ( snake_case_ : Dict ,snake_case_ : Any ): '''simple docstring''' UpperCamelCase : List[str] = ViTConfig(image_size=3_8_4 ,qkv_bias=_UpperCamelCase ) UpperCamelCase : Optional[int] = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: UpperCamelCase : List[str] = 7_6_8 elif "large" in checkpoint_url: # use ViT-large encoder UpperCamelCase : Optional[Any] = 1_0_2_4 UpperCamelCase : Optional[Any] = 4_0_9_6 UpperCamelCase : List[Any] = 2_4 UpperCamelCase : Optional[Any] = 1_6 UpperCamelCase : Optional[Any] = 1_0_2_4 else: raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: UpperCamelCase : Tuple = False UpperCamelCase : Optional[Any] = """relu""" UpperCamelCase : Union[str, Any] = 1_0_2_4 UpperCamelCase : Tuple = True UpperCamelCase : str = False UpperCamelCase : Optional[Any] = False # load HuggingFace model UpperCamelCase : int = ViTModel(_UpperCamelCase ,add_pooling_layer=_UpperCamelCase ) UpperCamelCase : Any = TrOCRForCausalLM(_UpperCamelCase ) UpperCamelCase : int = VisionEncoderDecoderModel(encoder=_UpperCamelCase ,decoder=_UpperCamelCase ) model.eval() # load state_dict of original model, rename some keys UpperCamelCase : List[str] = torch.hub.load_state_dict_from_url(_UpperCamelCase ,map_location="""cpu""" ,check_hash=_UpperCamelCase )["""model"""] UpperCamelCase : Any = create_rename_keys(_UpperCamelCase ,_UpperCamelCase ) for src, dest in rename_keys: rename_key(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) read_in_q_k_v(_UpperCamelCase ,_UpperCamelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): UpperCamelCase : Optional[int] = state_dict.pop(_UpperCamelCase ) if key.startswith("""decoder""" ) and "output_projection" not in key: UpperCamelCase : Union[str, Any] = val else: UpperCamelCase : Any = val # load state dict model.load_state_dict(_UpperCamelCase ) # Check outputs on an image UpperCamelCase : Any = ViTImageProcessor(size=encoder_config.image_size ) UpperCamelCase : Optional[Any] = RobertaTokenizer.from_pretrained("""roberta-large""" ) UpperCamelCase : Dict = TrOCRProcessor(_UpperCamelCase ,_UpperCamelCase ) UpperCamelCase : Optional[int] = processor(images=prepare_img(_UpperCamelCase ) ,return_tensors="""pt""" ).pixel_values # verify logits UpperCamelCase : Dict = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) UpperCamelCase : Any = model(pixel_values=_UpperCamelCase ,decoder_input_ids=_UpperCamelCase ) UpperCamelCase : Any = outputs.logits UpperCamelCase : Optional[Any] = torch.Size([1, 1, 5_0_2_6_5] ) if "trocr-base-handwritten" in checkpoint_url: UpperCamelCase : List[Any] = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: UpperCamelCase : str = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: UpperCamelCase : int = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: UpperCamelCase : Optional[int] = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :1_0] ,_UpperCamelCase ,atol=1e-3 ), "First elements of logits not as expected" Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(_UpperCamelCase ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) __A : Union[str, Any] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
358
"""simple docstring""" import requests from bsa import BeautifulSoup def A_ ( snake_case_ : str = "https://www.worldometers.info/coronavirus" ): '''simple docstring''' UpperCamelCase : Any = BeautifulSoup(requests.get(snake_case_ ).text ,"""html.parser""" ) UpperCamelCase : Optional[int] = soup.findAll("""h1""" ) UpperCamelCase : List[Any] = soup.findAll("""div""" ,{"""class""": """maincounter-number"""} ) keys += soup.findAll("""span""" ,{"""class""": """panel-title"""} ) values += soup.findAll("""div""" ,{"""class""": """number-table-main"""} ) return {key.text.strip(): value.text.strip() for key, value in zip(snake_case_ ,snake_case_ )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(F'''{key}\n{value}\n''')
27
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class lowerCamelCase : '''simple docstring''' __snake_case = PegasusConfig __snake_case = {} __snake_case = 'gelu' def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[int]=99 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Dict=37 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Any=40 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : int=0 , ) -> List[Any]: '''simple docstring''' A__ : List[str] =parent A__ : Union[str, Any] =batch_size A__ : List[Any] =seq_length A__ : Tuple =is_training A__ : Any =use_labels A__ : List[str] =vocab_size A__ : int =hidden_size A__ : List[Any] =num_hidden_layers A__ : Optional[int] =num_attention_heads A__ : Any =intermediate_size A__ : str =hidden_dropout_prob A__ : List[str] =attention_probs_dropout_prob A__ : Any =max_position_embeddings A__ : Tuple =eos_token_id A__ : Dict =pad_token_id A__ : Optional[int] =bos_token_id def lowercase__ ( self : Optional[int] ) -> int: '''simple docstring''' A__ : str =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A__ : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A__ : List[str] =tf.concat([input_ids, eos_tensor] , axis=1 ) A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : List[Any] =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A__ : List[Any] =prepare_pegasus_inputs_dict(__snake_case , __snake_case , __snake_case ) return config, inputs_dict def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> Optional[int]: '''simple docstring''' A__ : Union[str, Any] =TFPegasusModel(config=__snake_case ).get_decoder() A__ : Union[str, Any] =inputs_dict["""input_ids"""] A__ : Union[str, Any] =input_ids[:1, :] A__ : Optional[int] =inputs_dict["""attention_mask"""][:1, :] A__ : int =inputs_dict["""head_mask"""] A__ : Tuple =1 # first forward pass A__ : Dict =model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case ) A__ , A__ : List[Any] =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ : str =ids_tensor((self.batch_size, 3) , config.vocab_size ) A__ : Union[str, Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A__ : Optional[int] =tf.concat([input_ids, next_tokens] , axis=-1 ) A__ : List[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A__ : Tuple =model(__snake_case , attention_mask=__snake_case )[0] A__ : Any =model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A__ : Optional[int] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A__ : Optional[int] =output_from_no_past[:, -3:, random_slice_idx] A__ : List[str] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__snake_case , __snake_case , rtol=1e-3 ) def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : int, __snake_case : Union[str, Any], __snake_case : str=None, __snake_case : List[str]=None, __snake_case : Tuple=None, __snake_case : Union[str, Any]=None, __snake_case : Optional[int]=None, ) -> Optional[int]: """simple docstring""" if attention_mask is None: A__ : int =tf.cast(tf.math.not_equal(UpperCamelCase_, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: A__ : Tuple =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: A__ : Dict =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ : Any =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase ): '''simple docstring''' __snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () __snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () __snake_case = ( { 'conversational': TFPegasusForConditionalGeneration, 'feature-extraction': TFPegasusModel, 'summarization': TFPegasusForConditionalGeneration, 'text2text-generation': TFPegasusForConditionalGeneration, 'translation': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) __snake_case = True __snake_case = False __snake_case = False def lowercase__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' A__ : List[Any] =TFPegasusModelTester(self ) A__ : Dict =ConfigTester(self , config_class=__snake_case ) def lowercase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : Dict ) -> List[str]: '''simple docstring''' A__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__snake_case ) @require_sentencepiece @require_tokenizers @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' __snake_case = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] __snake_case = [ 'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to' ' reduce the risk of wildfires.', 'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.', ] # differs slightly from pytorch, likely due to numerical differences in linear layers __snake_case = 'google/pegasus-xsum' @cached_property def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowercase__ ( self : Optional[int] ) -> Any: '''simple docstring''' A__ : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowercase__ ( self : Optional[int] , **lowerCAmelCase_ : List[str] ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =self.translate_src_text(**__snake_case ) assert self.expected_text == generated_words def lowercase__ ( self : List[str] , **lowerCAmelCase_ : List[Any] ) -> Optional[int]: '''simple docstring''' A__ : Tuple =self.tokenizer(self.src_text , **__snake_case , padding=__snake_case , return_tensors="""tf""" ) A__ : str =self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__snake_case , ) A__ : Any =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__snake_case ) return generated_words @slow def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' self._assert_generated_batch_equal_expected()
134
from __future__ import annotations _SCREAMING_SNAKE_CASE : Optional[int] = [] def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" for i in range(len(UpperCamelCase_ ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase_ ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,-1 ,-1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,len(UpperCamelCase_ ) ) ): if board[i][j] == 1: return False return True def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" if row >= len(UpperCamelCase_ ): solution.append(UpperCamelCase_ ) printboard(UpperCamelCase_ ) print() return True for i in range(len(UpperCamelCase_ ) ): if is_safe(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ): snake_case = 1 solve(UpperCamelCase_ ,row + 1 ) snake_case = 0 return False def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" for i in range(len(UpperCamelCase_ ) ): for j in range(len(UpperCamelCase_ ) ): if board[i][j] == 1: print('''Q''' ,end=''' ''' ) else: print('''.''' ,end=''' ''' ) print() # n=int(input("The no. of queens")) _SCREAMING_SNAKE_CASE : Tuple = 8 _SCREAMING_SNAKE_CASE : List[Any] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
127
0
'''simple docstring''' import os def _lowerCAmelCase ( __snake_case : List[str] ) -> Optional[Any]: __A : List[str] = len(grid[0] ) __A : List[Any] = len(__snake_case ) __A : int = 0 __A : List[str] = 0 __A : int = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(__snake_case ): for j in range(n_rows - 3 ): __A : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] __A : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: __A : Dict = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: __A : List[Any] = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) __A : Tuple = max( __snake_case , __snake_case , __snake_case , __snake_case ) if max_product > largest: __A : str = max_product return largest def _lowerCAmelCase ( ) -> str: __A : List[str] = [] with open(os.path.dirname(__snake_case ) + '/grid.txt' ) as file: for line in file: grid.append(line.strip('\n' ).split(' ' ) ) __A : List[str] = [[int(__snake_case ) for i in grid[j]] for j in range(len(__snake_case ) )] return largest_product(__snake_case ) if __name__ == "__main__": print(solution())
190
'''simple docstring''' import math def _lowerCAmelCase ( __snake_case : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int: __A : Dict = 3 __A : int = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(__snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
190
1
'''simple docstring''' import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __UpperCAmelCase ="base_with_context" def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCamelCase__ ) for lyr_num, lyr in enumerate(model.encoders ): __lowerCamelCase = weights[f"""layers_{lyr_num}"""] __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) __lowerCamelCase = ly_weight['''attention'''] __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCamelCase__ ) for lyr_num, lyr in enumerate(model.encoders ): __lowerCamelCase = weights[f"""layers_{lyr_num}"""] __lowerCamelCase = ly_weight['''attention'''] __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCamelCase__ ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) ) for lyr_num, lyr in enumerate(model.decoders ): __lowerCamelCase = weights[f"""layers_{lyr_num}"""] __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) ) __lowerCamelCase = ly_weight['''self_attention'''] __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowerCamelCase = ly_weight['''MultiHeadDotProductAttention_0'''] __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) ) __lowerCamelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) ) return model def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]: __lowerCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path ) __lowerCamelCase = jnp.tree_util.tree_map(onp.array , UpperCamelCase__ ) __lowerCamelCase = [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] __lowerCamelCase = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' ) __lowerCamelCase = inference.parse_training_gin_file(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = inference.InferenceModel(args.checkpoint_path , UpperCamelCase__ ) __lowerCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' ) __lowerCamelCase = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) __lowerCamelCase = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) __lowerCamelCase = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) __lowerCamelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , UpperCamelCase__ ) __lowerCamelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , UpperCamelCase__ ) __lowerCamelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''] , UpperCamelCase__ ) __lowerCamelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' ) __lowerCamelCase = SpectrogramDiffusionPipeline( notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __UpperCAmelCase =argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=f'{MODEL}/checkpoint_500000', type=str, required=False, help="Path to the original jax model checkpoint.", ) __UpperCAmelCase =parser.parse_args() main(args)
67
"""simple docstring""" import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" if is_torch_version('<' , '2.0.0' ) or not hasattr(UpperCamelCase__ , '_dynamo' ): return False return isinstance(UpperCamelCase__ , torch._dynamo.eval_frame.OptimizedModule ) def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = True ): """simple docstring""" A__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) A__ = is_compiled_module(UpperCamelCase__ ) if is_compiled: A__ = model A__ = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(UpperCamelCase__ , UpperCamelCase__ ): A__ = model.module if not keep_fpaa_wrapper: A__ = getattr(UpperCamelCase__ , 'forward' ) A__ = model.__dict__.pop('_original_forward' , UpperCamelCase__ ) if original_forward is not None: while hasattr(UpperCamelCase__ , '__wrapped__' ): A__ = forward.__wrapped__ if forward == original_forward: break A__ = forward if getattr(UpperCamelCase__ , '_converted_to_transformer_engine' , UpperCamelCase__ ): convert_model(UpperCamelCase__ , to_transformer_engine=UpperCamelCase__ ) if is_compiled: A__ = model A__ = compiled_model return model def UpperCAmelCase ( ): """simple docstring""" PartialState().wait_for_everyone() def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(UpperCamelCase__ , UpperCamelCase__ ) elif PartialState().local_process_index == 0: torch.save(UpperCamelCase__ , UpperCamelCase__ ) @contextmanager def UpperCAmelCase ( **UpperCamelCase__ ): """simple docstring""" for key, value in kwargs.items(): A__ = str(UpperCamelCase__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" if not hasattr(UpperCamelCase__ , '__qualname__' ) and not hasattr(UpperCamelCase__ , '__name__' ): A__ = getattr(UpperCamelCase__ , '__class__' , UpperCamelCase__ ) if hasattr(UpperCamelCase__ , '__qualname__' ): return obj.__qualname__ if hasattr(UpperCamelCase__ , '__name__' ): return obj.__name__ return str(UpperCamelCase__ ) def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" for key, value in source.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): A__ = destination.setdefault(UpperCamelCase__ , {} ) merge_dicts(UpperCamelCase__ , UpperCamelCase__ ) else: A__ = value return destination def UpperCAmelCase ( UpperCamelCase__ = None ): """simple docstring""" if port is None: A__ = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
221
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMSNModel''', '''ViTMSNForImageClassification''', '''ViTMSNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
103
import re def UpperCamelCase ( snake_case__ : str ) -> str: if len(re.findall('[ATCG]' , snake_case__ ) ) != len(snake_case__ ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
103
1
def lowerCAmelCase_ ( _lowercase : List[Any]) -> List[Any]: """simple docstring""" if not isinstance(_lowercase , _lowercase): a__ : Union[str, Any] = F'''Input value of [number={number}] must be an integer''' raise TypeError(_lowercase) if number < 1: a__ : str = F'''Input value of [number={number}] must be > 0''' raise ValueError(_lowercase) a__ : List[str] = 1 for i in range(1 , _lowercase): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
170
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class _snake_case ( _a ): _A : int = ComputeEnvironment.AMAZON_SAGEMAKER _A : List[Any] = True _A : Dict = '''ml.p3.2xlarge''' _A : Any = '''accelerate_sagemaker_execution_role''' _A : Union[str, Any] = '''hf-sm''' _A : Dict = '''us-east-1''' _A : List[Any] = 1 _A : Union[str, Any] = '''accelerate-sagemaker-1''' _A : List[Any] = '''1.6''' _A : Optional[Any] = '''4.4''' _A : Any = '''train.py''' _A : int = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''False''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] _A : Optional[Any] = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''--do_test''', '''False''', '''--do_predict''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] class _snake_case ( unittest.TestCase ): def __UpperCamelCase ( self : List[str] ): # If no defaults are changed, `to_kwargs` returns an empty dict. SCREAMING_SNAKE_CASE:str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["model_name_or_path"] ,SCREAMING_SNAKE_CASE__ ) assert isinstance(converted_args["do_train"] ,SCREAMING_SNAKE_CASE__ ) assert isinstance(converted_args["epochs"] ,SCREAMING_SNAKE_CASE__ ) assert isinstance(converted_args["learning_rate"] ,SCREAMING_SNAKE_CASE__ ) assert isinstance(converted_args["max_steps"] ,SCREAMING_SNAKE_CASE__ ) with pytest.raises(SCREAMING_SNAKE_CASE__ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
139
0
'''simple docstring''' from math import factorial class __UpperCamelCase : def __init__( self , __a , __a ): '''simple docstring''' __a : List[str] = real if isinstance(__a , __a ): __a : int = [1] * rank else: __a : Optional[int] = rank def __repr__( self ): '''simple docstring''' return ( f"""{self.real}+""" f"""{"+".join(str(__a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , __a ) def __add__( self , __a ): '''simple docstring''' if not isinstance(__a , __a ): return Dual(self.real + other , self.duals ) __a : Dict = self.duals.copy() __a : List[Any] = other.duals.copy() if len(__a ) > len(__a ): o_dual.extend([1] * (len(__a ) - len(__a )) ) elif len(__a ) < len(__a ): s_dual.extend([1] * (len(__a ) - len(__a )) ) __a : Union[str, Any] = [] for i in range(len(__a ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , __a ) A_ = __add__ def __sub__( self , __a ): '''simple docstring''' return self + other * -1 def __mul__( self , __a ): '''simple docstring''' if not isinstance(__a , __a ): __a : Dict = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , __a ) __a : int = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , __a ) A_ = __mul__ def __truediv__( self , __a ): '''simple docstring''' if not isinstance(__a , __a ): __a : List[Any] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , __a ) raise ValueError def __floordiv__( self , __a ): '''simple docstring''' if not isinstance(__a , __a ): __a : List[Any] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , __a ) raise ValueError def __pow__( self , __a ): '''simple docstring''' if n < 0 or isinstance(__a , __a ): raise ValueError('power must be a positive integer' ) if n == 0: return 1 if n == 1: return self __a : List[str] = self for _ in range(n - 1 ): x *= self return x def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): if not callable(_SCREAMING_SNAKE_CASE ): raise ValueError('differentiate() requires a function as input for func' ) if not isinstance(_SCREAMING_SNAKE_CASE , (float, int) ): raise ValueError('differentiate() requires a float as input for position' ) if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('differentiate() requires an int as input for order' ) __a : str = Dual(_SCREAMING_SNAKE_CASE , 1 ) __a : Any = func(_SCREAMING_SNAKE_CASE ) if order == 0: return result.real return result.duals[order - 1] * factorial(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): return y**2 * y**4 print(differentiate(f, 9, 2))
366
'''simple docstring''' import re from filelock import FileLock try: import nltk __lowercase : Optional[Any] = True except (ImportError, ModuleNotFoundError): __lowercase : Dict = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
294
0
import argparse from collections import defaultdict import yaml __a = 'docs/source/en/_toctree.yml' def a ( snake_case__: Dict ): '''simple docstring''' lowercase_ = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 lowercase_ = [key for key, value in counts.items() if value > 1] lowercase_ = [] for duplicate_key in duplicates: lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) def a ( snake_case__: List[Any]=False ): '''simple docstring''' with open(snake_case__ , encoding='''utf-8''' ) as f: lowercase_ = yaml.safe_load(f.read() ) # Get to the API doc lowercase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase_ = content[api_idx]['''sections'''] # Then to the model doc lowercase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowercase_ = api_doc[model_idx]['''sections'''] lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section] lowercase_ = False for idx, modality_doc in modalities_docs: lowercase_ = modality_doc['''sections'''] lowercase_ = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: lowercase_ = True if overwrite: lowercase_ = new_modality_doc if diff: if overwrite: lowercase_ = model_doc lowercase_ = api_doc with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __a = parser.parse_args() check_model_doc(args.fix_and_overwrite)
30
'''simple docstring''' import requests __lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here! __lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/' def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __lowercase : Dict = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
27
0
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if not isinstance(a_, a_ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(a_, a_ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) _UpperCAmelCase : List[str] = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(a_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
17
'''simple docstring''' import baseaa def __UpperCAmelCase ( a_: str ): return baseaa.baaencode(string.encode("utf-8" ) ) def __UpperCAmelCase ( a_: bytes ): return baseaa.baadecode(a_ ).decode("utf-8" ) if __name__ == "__main__": __a = 'Hello World!' __a = baseaa_encode(test) print(encoded) __a = baseaa_decode(encoded) print(decoded)
17
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = StableDiffusionSAGPipeline lowerCAmelCase = TEXT_TO_IMAGE_PARAMS lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' torch.manual_seed(0) __A : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) __A : Any = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0) __A : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) __A : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __A : Optional[Any] = CLIPTextModel(_UpperCAmelCase) __A : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') __A : Optional[Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=0): '''simple docstring''' if str(_UpperCAmelCase).startswith('mps'): __A : Optional[Any] = torch.manual_seed(_UpperCAmelCase) else: __A : Optional[int] = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase) __A : Any = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4') __A : Optional[Any] = sag_pipe.to(_UpperCAmelCase) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase) __A : Optional[Any] = '.' __A : Tuple = torch.manual_seed(0) __A : Optional[int] = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np') __A : Dict = output.images __A : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __A : Optional[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base') __A : str = sag_pipe.to(_UpperCAmelCase) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase) __A : int = '.' __A : Tuple = torch.manual_seed(0) __A : List[str] = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np') __A : Union[str, Any] = output.images __A : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __A : List[str] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base') __A : List[Any] = sag_pipe.to(_UpperCAmelCase) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase) __A : List[str] = '.' __A : List[Any] = torch.manual_seed(0) __A : Tuple = sag_pipe( [prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) __A : List[str] = output.images assert image.shape == (1, 512, 768, 3)
190
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : List[str] = { '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Any = [ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowercase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
190
1
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Union[str, Any] = BartphoTokenizer a_ : Optional[Any] = False a_ : Dict = True def UpperCAmelCase__ ( self) ->Optional[Any]: super().setUp() a_ = ["▁This", "▁is", "▁a", "▁t", "est"] a_ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase)))) a_ = {"unk_token": "<unk>"} a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp: for token in vocab_tokens: fp.write(F'''{token} {vocab_tokens[token]}\n''') a_ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Tuple: kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]: a_ = "This is a là test" a_ = "This is a<unk><unk> test" return input_text, output_text def UpperCAmelCase__ ( self) ->int: a_ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map) a_ = "This is a là test" a_ = "▁This ▁is ▁a ▁l à ▁t est".split() a_ = tokenizer.tokenize(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) a_ = tokens + [tokenizer.unk_token] a_ = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , __UpperCAmelCase)
350
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]: """simple docstring""" a_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] a_ = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } a_ = F'''{src_lang}-{tgt_lang}''' a_ = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase ) a_ = os.path.join(UpperCAmelCase , "README.md" ) print(F'''Generating {path}''' ) with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(UpperCAmelCase ) # make sure we are under the root of the project UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent UpperCamelCase_ = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: UpperCamelCase_ = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
303
0
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ): # Load configuration defined in the metadata file with open(__UpperCamelCase ) as metadata_file: lowerCAmelCase_ : Tuple = json.load(__UpperCamelCase ) lowerCAmelCase_ : Tuple = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata['''model_config'''] ) # Load in the weights from the checkpoint_path lowerCAmelCase_ : int = torch.load(__UpperCamelCase ,map_location='''cpu''' )['''module'''] # Load the entity vocab file lowerCAmelCase_ : Dict = load_original_entity_vocab(__UpperCamelCase ) # add an entry for [MASK2] lowerCAmelCase_ : Union[str, Any] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 lowerCAmelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks lowerCAmelCase_ : int = AddedToken('''<ent>''' ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) lowerCAmelCase_ : List[str] = AddedToken('''<ent2>''' ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,'''tokenizer_config.json''' ) ,'''r''' ) as f: lowerCAmelCase_ : str = json.load(__UpperCamelCase ) lowerCAmelCase_ : Any = '''MLukeTokenizer''' with open(os.path.join(__UpperCamelCase ,'''tokenizer_config.json''' ) ,'''w''' ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ) with open(os.path.join(__UpperCamelCase ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : int = MLukeTokenizer.from_pretrained(__UpperCamelCase ) # Initialize the embeddings of the special tokens lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(['''@'''] )[0] lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(['''#'''] )[0] lowerCAmelCase_ : List[Any] = state_dict['''embeddings.word_embeddings.weight'''] lowerCAmelCase_ : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 ) lowerCAmelCase_ : List[str] = word_emb[enta_init_index].unsqueeze(0 ) lowerCAmelCase_ : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: lowerCAmelCase_ : Optional[Any] = state_dict[bias_name] lowerCAmelCase_ : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 ) lowerCAmelCase_ : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 ) lowerCAmelCase_ : Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: lowerCAmelCase_ : Union[str, Any] = f"""encoder.layer.{layer_index}.attention.self.""" lowerCAmelCase_ : List[str] = state_dict[prefix + matrix_name] lowerCAmelCase_ : Optional[Any] = state_dict[prefix + matrix_name] lowerCAmelCase_ : Dict = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks lowerCAmelCase_ : str = state_dict['''entity_embeddings.entity_embeddings.weight'''] lowerCAmelCase_ : Tuple = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) lowerCAmelCase_ : int = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' lowerCAmelCase_ : Any = state_dict['''entity_predictions.bias'''] lowerCAmelCase_ : Tuple = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) lowerCAmelCase_ : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) lowerCAmelCase_ : Dict = LukeForMaskedLM(config=__UpperCamelCase ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) lowerCAmelCase_ : str = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): lowerCAmelCase_ : Tuple = state_dict[key] else: lowerCAmelCase_ : Any = state_dict[key] lowerCAmelCase_ , lowerCAmelCase_ : int = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase ) if set(__UpperCamelCase ) != {"luke.embeddings.position_ids"}: raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(__UpperCamelCase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs lowerCAmelCase_ : Union[str, Any] = MLukeTokenizer.from_pretrained(__UpperCamelCase ,task='''entity_classification''' ) lowerCAmelCase_ : Any = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' lowerCAmelCase_ : Union[str, Any] = (0, 9) lowerCAmelCase_ : Dict = tokenizer(__UpperCamelCase ,entity_spans=[span] ,return_tensors='''pt''' ) lowerCAmelCase_ : Any = model(**__UpperCamelCase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base lowerCAmelCase_ : Union[str, Any] = torch.Size((1, 33, 768) ) lowerCAmelCase_ : Any = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base lowerCAmelCase_ : List[str] = torch.Size((1, 1, 768) ) lowerCAmelCase_ : Dict = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ): raise ValueError # Verify masked word/entity prediction lowerCAmelCase_ : Union[str, Any] = MLukeTokenizer.from_pretrained(__UpperCamelCase ) lowerCAmelCase_ : str = '''Tokyo is the capital of <mask>.''' lowerCAmelCase_ : Dict = (24, 30) lowerCAmelCase_ : List[str] = tokenizer(__UpperCamelCase ,entity_spans=[span] ,return_tensors='''pt''' ) lowerCAmelCase_ : Optional[int] = model(**__UpperCamelCase ) lowerCAmelCase_ : str = encoding['''input_ids'''][0].tolist() lowerCAmelCase_ : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) lowerCAmelCase_ : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(__UpperCamelCase ) lowerCAmelCase_ : str = outputs.entity_logits[0][0].argmax().item() lowerCAmelCase_ : Optional[int] = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(__UpperCamelCase ) ) model.save_pretrained(__UpperCamelCase ) def UpperCamelCase( __UpperCamelCase : List[Any] ): lowerCAmelCase_ : int = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] lowerCAmelCase_ : Dict = [json.loads(__UpperCamelCase ) for line in open(__UpperCamelCase )] lowerCAmelCase_ : List[str] = {} for entry in data: lowerCAmelCase_ : str = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: lowerCAmelCase_ : Optional[int] = entity_id break lowerCAmelCase_ : Tuple = f"""{language}:{entity_name}""" lowerCAmelCase_ : Optional[int] = entity_id return new_mapping if __name__ == "__main__": A__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) A__ : Dict = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
103
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Optional[Any] , A_ : Dict , A_ : Optional[Any]=2 , A_ : List[str]=True , A_ : Dict=False , A_ : Union[str, Any]=1_0 , A_ : Optional[Any]=3 , A_ : str=3_2 * 8 , A_ : List[str]=3_2 * 8 , A_ : Dict=4 , A_ : List[Any]=6_4 , ): lowerCAmelCase_ : Any = parent lowerCAmelCase_ : Union[str, Any] = batch_size lowerCAmelCase_ : List[Any] = is_training lowerCAmelCase_ : int = use_auxiliary_loss lowerCAmelCase_ : str = num_queries lowerCAmelCase_ : Any = num_channels lowerCAmelCase_ : Union[str, Any] = min_size lowerCAmelCase_ : Optional[int] = max_size lowerCAmelCase_ : List[str] = num_labels lowerCAmelCase_ : str = hidden_dim lowerCAmelCase_ : List[str] = hidden_dim def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( A_) lowerCAmelCase_ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A_) lowerCAmelCase_ : List[str] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A_) > 0.5 ).float() lowerCAmelCase_ : Any = (torch.rand((self.batch_size, self.num_labels) , device=A_) > 0.5).long() lowerCAmelCase_ : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase__ ( self : Optional[Any]): lowerCAmelCase_ : Optional[Any] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) lowerCAmelCase_ : Dict = self.num_queries lowerCAmelCase_ : str = self.num_labels lowerCAmelCase_ : str = [1, 1, 1, 1] lowerCAmelCase_ : Dict = self.num_channels lowerCAmelCase_ : List[str] = 6_4 lowerCAmelCase_ : Union[str, Any] = 1_2_8 lowerCAmelCase_ : str = self.hidden_dim lowerCAmelCase_ : Optional[Any] = self.hidden_dim lowerCAmelCase_ : Any = self.hidden_dim return config def UpperCAmelCase__ ( self : Union[str, Any]): lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase_ : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[Any] , A_ : Tuple): lowerCAmelCase_ : Any = output.encoder_hidden_states lowerCAmelCase_ : int = output.pixel_decoder_hidden_states lowerCAmelCase_ : Union[str, Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(A_) , len(config.backbone_config.depths)) self.parent.assertTrue(len(A_) , len(config.backbone_config.depths)) self.parent.assertTrue(len(A_) , config.decoder_layers) def UpperCAmelCase__ ( self : Optional[int] , A_ : int , A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : str=False): with torch.no_grad(): lowerCAmelCase_ : Union[str, Any] = MaskaFormerModel(config=A_) model.to(A_) model.eval() lowerCAmelCase_ : List[Any] = model(pixel_values=A_ , pixel_mask=A_) lowerCAmelCase_ : Any = model(A_ , output_hidden_states=A_) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(A_ , A_) def UpperCAmelCase__ ( self : List[Any] , A_ : Union[str, Any] , A_ : List[str] , A_ : List[Any] , A_ : Tuple , A_ : Any): lowerCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(config=A_) model.to(A_) model.eval() def comm_check_on_output(A_ : Optional[int]): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): lowerCAmelCase_ : List[Any] = model(pixel_values=A_ , pixel_mask=A_) lowerCAmelCase_ : List[Any] = model(A_) comm_check_on_output(A_) lowerCAmelCase_ : Any = model( pixel_values=A_ , pixel_mask=A_ , mask_labels=A_ , class_labels=A_) comm_check_on_output(A_) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ): _a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _a = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} _a = False _a = False _a = False _a = False def UpperCAmelCase__ ( self : Any): lowerCAmelCase_ : Tuple = MaskaFormerModelTester(self) lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_) def UpperCAmelCase__ ( self : Optional[int]): self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Optional[Any]): lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ , **A_ , output_hidden_states=A_) def UpperCAmelCase__ ( self : List[Any]): lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_) @unittest.skip(reason='''Mask2Former does not use inputs_embeds''') def UpperCAmelCase__ ( self : Optional[Any]): pass @unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''') def UpperCAmelCase__ ( self : str): pass @unittest.skip(reason='''Mask2Former is not a generative model''') def UpperCAmelCase__ ( self : int): pass @unittest.skip(reason='''Mask2Former does not use token embeddings''') def UpperCAmelCase__ ( self : Tuple): pass @require_torch_multi_gpu @unittest.skip( reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''') def UpperCAmelCase__ ( self : List[str]): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def UpperCAmelCase__ ( self : Tuple): pass def UpperCAmelCase__ ( self : Dict): lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Union[str, Any] = model_class(A_) lowerCAmelCase_ : Dict = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : Tuple = [*signature.parameters.keys()] lowerCAmelCase_ : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A_) @slow def UpperCAmelCase__ ( self : List[str]): for model_name in ["facebook/mask2former-swin-small-coco-instance"]: lowerCAmelCase_ : List[str] = MaskaFormerModel.from_pretrained(A_) self.assertIsNotNone(A_) def UpperCAmelCase__ ( self : Optional[Any]): lowerCAmelCase_ : Optional[int] = (self.model_tester.min_size,) * 2 lowerCAmelCase_ : str = { '''pixel_values''': torch.randn((2, 3, *size) , device=A_), '''mask_labels''': torch.randn((2, 1_0, *size) , device=A_), '''class_labels''': torch.zeros(2 , 1_0 , device=A_).long(), } lowerCAmelCase_ : Union[str, Any] = self.model_tester.get_config() lowerCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(A_).to(A_) lowerCAmelCase_ : Union[str, Any] = model(**A_) self.assertTrue(outputs.loss is not None) def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A_ , **A_ , output_hidden_states=A_) def UpperCAmelCase__ ( self : Union[str, Any]): lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Union[str, Any] = model_class(A_).to(A_) lowerCAmelCase_ : List[str] = model(**A_ , output_attentions=A_) self.assertTrue(outputs.attentions is not None) def UpperCAmelCase__ ( self : List[Any]): if not self.model_tester.is_training: return lowerCAmelCase_ : Dict = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = model_class(A_) model.to(A_) model.train() lowerCAmelCase_ : List[Any] = model(A_ , mask_labels=A_ , class_labels=A_).loss loss.backward() def UpperCAmelCase__ ( self : str): lowerCAmelCase_ : str = self.all_model_classes[1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ : Optional[Any] = True lowerCAmelCase_ : Dict = True lowerCAmelCase_ : Any = model_class(A_).to(A_) model.train() lowerCAmelCase_ : List[str] = model(A_ , mask_labels=A_ , class_labels=A_) lowerCAmelCase_ : int = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() lowerCAmelCase_ : Dict = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase_ : str = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=A_) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) A__ : int = 1E-4 def UpperCamelCase( ): lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Tuple): return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCAmelCase__ ( self : Optional[Any]): return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCAmelCase__ ( self : int): lowerCAmelCase_ : Any = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(A_) lowerCAmelCase_ : str = self.default_image_processor lowerCAmelCase_ : Tuple = prepare_img() lowerCAmelCase_ : Any = image_processor(A_ , return_tensors='''pt''').to(A_) lowerCAmelCase_ : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0) # check size self.assertEqual(A_ , (1, 3, 3_8_4, 3_8_4)) with torch.no_grad(): lowerCAmelCase_ : Optional[Any] = model(**A_) lowerCAmelCase_ : List[str] = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]]).to(A_) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_)) lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]]).to(A_) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_)) lowerCAmelCase_ : Optional[int] = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]]).to(A_) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A_ , atol=A_)) def UpperCAmelCase__ ( self : List[Any]): lowerCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(A_).eval() lowerCAmelCase_ : Optional[int] = self.default_image_processor lowerCAmelCase_ : List[Any] = prepare_img() lowerCAmelCase_ : Tuple = image_processor(A_ , return_tensors='''pt''').to(A_) lowerCAmelCase_ : Union[str, Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0) # check size self.assertEqual(A_ , (1, 3, 3_8_4, 3_8_4)) with torch.no_grad(): lowerCAmelCase_ : Tuple = model(**A_) # masks_queries_logits lowerCAmelCase_ : int = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) lowerCAmelCase_ : Tuple = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] lowerCAmelCase_ : Optional[Any] = torch.tensor(A_).to(A_) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A_ , atol=A_)) # class_queries_logits lowerCAmelCase_ : Dict = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) lowerCAmelCase_ : Any = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ]).to(A_) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A_ , atol=A_)) def UpperCAmelCase__ ( self : Any): lowerCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(A_).eval() lowerCAmelCase_ : Optional[Any] = self.default_image_processor lowerCAmelCase_ : Optional[Any] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='''pt''' , ) lowerCAmelCase_ : Dict = inputs['''pixel_values'''].to(A_) lowerCAmelCase_ : Tuple = [el.to(A_) for el in inputs['''mask_labels''']] lowerCAmelCase_ : str = [el.to(A_) for el in inputs['''class_labels''']] with torch.no_grad(): lowerCAmelCase_ : int = model(**A_) self.assertTrue(outputs.loss is not None)
103
1
def _A ( snake_case ) -> list: return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__UpperCAmelCase ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('doctest').testmod()
356
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _snake_case = logging.get_logger(__name__) class a__ ( lowerCamelCase_ ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
199
0
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json", } class UpperCAmelCase_ ( snake_case_ ): lowercase__ = '''mvp''' lowercase__ = ['''past_key_values'''] lowercase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , snake_case_ : List[str]=50_267 , snake_case_ : Optional[Any]=1_024 , snake_case_ : Tuple=12 , snake_case_ : Optional[Any]=4_096 , snake_case_ : int=16 , snake_case_ : Tuple=12 , snake_case_ : int=4_096 , snake_case_ : List[Any]=16 , snake_case_ : Tuple=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Tuple="gelu" , snake_case_ : Union[str, Any]=1_024 , snake_case_ : List[str]=0.1 , snake_case_ : Any=0.0 , snake_case_ : Dict=0.0 , snake_case_ : Tuple=0.02 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=False , snake_case_ : int=True , snake_case_ : Tuple=1 , snake_case_ : Dict=0 , snake_case_ : Union[str, Any]=2 , snake_case_ : Optional[int]=True , snake_case_ : Tuple=2 , snake_case_ : Any=2 , snake_case_ : Optional[Any]=False , snake_case_ : Dict=100 , snake_case_ : Union[str, Any]=800 , **snake_case_ : Dict , ) -> List[Any]: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = d_model A__ = encoder_ffn_dim A__ = encoder_layers A__ = encoder_attention_heads A__ = decoder_ffn_dim A__ = decoder_layers A__ = decoder_attention_heads A__ = dropout A__ = attention_dropout A__ = activation_dropout A__ = activation_function A__ = init_std A__ = encoder_layerdrop A__ = decoder_layerdrop A__ = classifier_dropout A__ = use_cache A__ = encoder_layers A__ = scale_embedding # scale factor will be sqrt(d_model) if True A__ = use_prompt A__ = prompt_length A__ = prompt_mid_dim super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , UpperCAmelCase__ ): A__ = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." )
247
"""simple docstring""" _snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}] _snake_case = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
294
0
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class A ( unittest.TestCase ): def __init__(self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=7 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[Any]=1_8 , __UpperCAmelCase : Optional[Any]=3_0 , __UpperCAmelCase : Tuple=4_0_0 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : int=None , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=[0.5, 0.5, 0.5] , __UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , __UpperCAmelCase : Union[str, Any]=False , ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = size if size is not None else {"height": 2_0, "width": 2_0} UpperCAmelCase__ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8} UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = image_size UpperCAmelCase__ = min_resolution UpperCAmelCase__ = max_resolution UpperCAmelCase__ = do_resize UpperCAmelCase__ = size UpperCAmelCase__ = do_center_crop UpperCAmelCase__ = crop_size UpperCAmelCase__ = do_normalize UpperCAmelCase__ = image_mean UpperCAmelCase__ = image_std UpperCAmelCase__ = do_reduce_labels def lowercase_ (self : Optional[Any] ) -> List[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase__ = load_dataset("hf-internal-testing/fixtures_ade20k", split="test" ) UpperCAmelCase__ = Image.open(dataset[0]["file"] ) UpperCAmelCase__ = Image.open(dataset[1]["file"] ) return image, map def lowerCAmelCase_ ( ) -> List[str]: '''simple docstring''' UpperCAmelCase__ = load_dataset("hf-internal-testing/fixtures_ade20k", split="test" ) UpperCAmelCase__ = Image.open(ds[0]["file"] ) UpperCAmelCase__ = Image.open(ds[1]["file"] ) UpperCAmelCase__ = Image.open(ds[2]["file"] ) UpperCAmelCase__ = Image.open(ds[3]["file"] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class A ( __a , unittest.TestCase ): __UpperCAmelCase : Optional[Any] = BeitImageProcessor if is_vision_available() else None def lowercase_ (self : Dict ) -> List[str]: """simple docstring""" UpperCAmelCase__ = BeitImageProcessingTester(self ) @property def lowercase_ (self : Dict ) -> Union[str, Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ (self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a__ , "do_resize" ) ) self.assertTrue(hasattr(a__ , "size" ) ) self.assertTrue(hasattr(a__ , "do_center_crop" ) ) self.assertTrue(hasattr(a__ , "center_crop" ) ) self.assertTrue(hasattr(a__ , "do_normalize" ) ) self.assertTrue(hasattr(a__ , "image_mean" ) ) self.assertTrue(hasattr(a__ , "image_std" ) ) def lowercase_ (self : int ) -> Any: """simple docstring""" UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 2_0, "width": 2_0} ) self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} ) self.assertEqual(image_processor.do_reduce_labels , a__ ) UpperCAmelCase__ = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=a__ ) self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} ) self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} ) self.assertEqual(image_processor.do_reduce_labels , a__ ) def lowercase_ (self : Optional[Any] ) -> Dict: """simple docstring""" pass def lowercase_ (self : Dict ) -> int: """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ ) for image in image_inputs: self.assertIsInstance(a__ , Image.Image ) # Test not batched input UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCAmelCase__ = image_processing(a__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowercase_ (self : Optional[int] ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ ) for image in image_inputs: self.assertIsInstance(a__ , np.ndarray ) # Test not batched input UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCAmelCase__ = image_processing(a__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowercase_ (self : List[str] ) -> int: """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ ) for image in image_inputs: self.assertIsInstance(a__ , torch.Tensor ) # Test not batched input UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCAmelCase__ = image_processing(a__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowercase_ (self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ ) UpperCAmelCase__ = [] for image in image_inputs: self.assertIsInstance(a__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input UpperCAmelCase__ = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 2_5_5 ) # Test batched UpperCAmelCase__ = image_processing(a__ , a__ , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 2_5_5 ) # Test not batched input (PIL images) UpperCAmelCase__ , UpperCAmelCase__ = prepare_semantic_single_inputs() UpperCAmelCase__ = image_processing(a__ , a__ , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 2_5_5 ) # Test batched input (PIL images) UpperCAmelCase__ , UpperCAmelCase__ = prepare_semantic_batch_inputs() UpperCAmelCase__ = image_processing(a__ , a__ , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 2, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 2_5_5 ) def lowercase_ (self : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 UpperCAmelCase__ , UpperCAmelCase__ = prepare_semantic_single_inputs() UpperCAmelCase__ = image_processing(a__ , a__ , return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 1_5_0 ) UpperCAmelCase__ = True UpperCAmelCase__ = image_processing(a__ , a__ , return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
360
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def lowerCAmelCase_ ( __A=None ) -> str: '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser(add_help=__A, allow_abbrev=__A ) # The main config parser UpperCAmelCase__ = config_command_parser(__A ) # The subparser to add commands to UpperCAmelCase__ = config_parser.add_subparsers(title="subcommands", dest="subcommand" ) # Then add other parsers with the parent parser default_command_parser(__A, parents=[parent_parser] ) update_command_parser(__A, parents=[parent_parser] ) return config_parser def lowerCAmelCase_ ( ) -> Any: '''simple docstring''' UpperCAmelCase__ = get_config_parser() UpperCAmelCase__ = config_parser.parse_args() if not hasattr(__A, "func" ): config_parser.print_help() exit(1 ) # Run args.func(__A ) if __name__ == "__main__": main()
143
0
"""simple docstring""" def _A ( UpperCamelCase_ : int, UpperCamelCase_ : int) -> str: '''simple docstring''' if not isinstance(UpperCamelCase_, UpperCamelCase_): raise ValueError("iterations must be defined as integers") if not isinstance(UpperCamelCase_, UpperCamelCase_) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0") if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz") __lowercase = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(UpperCamelCase_) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
17
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _a = '__DUMMY_TRANSFORMERS_USER__' _a = 'Dummy User' _a = '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' _a = 'https://hub-ci.huggingface.co' _a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' _a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' _a = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def _A ( UpperCamelCase_ : List[Any]) -> Tuple: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : int) -> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : str) -> Dict: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]: '''simple docstring''' HfFolder.save_token(UpperCamelCase_) yield HfFolder.delete_token() @pytest.fixture(scope="session") def _A ( ) -> List[Any]: '''simple docstring''' return HfApi(endpoint=UpperCamelCase_) @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi) -> List[Any]: '''simple docstring''' __lowercase = HfFolder.get_token() HfFolder.save_token(UpperCamelCase_) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Dict) -> int: '''simple docstring''' def _cleanup_repo(UpperCamelCase_ : Optional[int]): hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") return _cleanup_repo @pytest.fixture def _A ( UpperCamelCase_ : str) -> Any: '''simple docstring''' @contextmanager def _temporary_repo(UpperCamelCase_ : Any): try: yield repo_id finally: cleanup_repo(UpperCamelCase_) return _temporary_repo @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int: '''simple docstring''' __lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
17
1
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def a__ ( __SCREAMING_SNAKE_CASE ) -> int: __lowerCAmelCase: Optional[int] = FileLock(str(tmpdir / "foo.lock" ) ) __lowerCAmelCase: List[str] = FileLock(str(tmpdir / "foo.lock" ) ) __lowerCAmelCase: Optional[Any] = 0.01 with locka.acquire(): with pytest.raises(__SCREAMING_SNAKE_CASE ): __lowerCAmelCase: int = time.time() locka.acquire(__SCREAMING_SNAKE_CASE ) assert time.time() - _start > timeout def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]: __lowerCAmelCase: str = "a" * 1_0_0_0 + ".lock" __lowerCAmelCase: Dict = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(__SCREAMING_SNAKE_CASE ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 __lowerCAmelCase: Optional[Any] = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__SCREAMING_SNAKE_CASE ): locka.acquire(0 )
108
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class snake_case ( unittest.TestCase ): def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=1_8 , UpperCamelCase__ : List[Any]=3_0 , UpperCamelCase__ : List[str]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=[0.48145466, 0.4578275, 0.40821073] , UpperCamelCase__ : str=[0.26862954, 0.26130258, 0.27577711] , UpperCamelCase__ : List[str]=True , )-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: Dict = size if size is not None else {"height": 2_2_4, "width": 2_2_4} __lowerCAmelCase: Union[str, Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8} __lowerCAmelCase: Optional[int] = parent __lowerCAmelCase: List[str] = batch_size __lowerCAmelCase: Union[str, Any] = num_channels __lowerCAmelCase: Optional[Any] = image_size __lowerCAmelCase: Tuple = min_resolution __lowerCAmelCase: List[str] = max_resolution __lowerCAmelCase: List[Any] = do_resize __lowerCAmelCase: Union[str, Any] = size __lowerCAmelCase: List[Any] = do_center_crop __lowerCAmelCase: Optional[int] = crop_size __lowerCAmelCase: Dict = do_normalize __lowerCAmelCase: List[str] = image_mean __lowerCAmelCase: Optional[int] = image_std __lowerCAmelCase: str = do_convert_rgb def lowercase_ ( self : Tuple)-> str: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowercase_ ( self : Any , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Dict=False)-> List[str]: '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __lowerCAmelCase: Optional[int] = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: __lowerCAmelCase: List[str] = [] for i in range(self.batch_size): __lowerCAmelCase , __lowerCAmelCase: List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __lowerCAmelCase: Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1)) for x in image_inputs] if torchify: __lowerCAmelCase: str = [torch.from_numpy(UpperCamelCase__) for x in image_inputs] return image_inputs @require_torch @require_vision class snake_case ( __snake_case, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : str = ChineseCLIPImageProcessor if is_vision_available() else None def lowercase_ ( self : Any)-> List[Any]: '''simple docstring''' __lowerCAmelCase: Tuple = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase__) @property def lowercase_ ( self : Any)-> Optional[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : Union[str, Any])-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase__ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase__ , "size")) self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop")) self.assertTrue(hasattr(UpperCamelCase__ , "center_crop")) self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase__ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase__ , "image_std")) self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb")) def lowercase_ ( self : List[Any])-> str: '''simple docstring''' __lowerCAmelCase: Tuple = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4}) self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8}) __lowerCAmelCase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4) self.assertEqual(image_processor.size , {"shortest_edge": 4_2}) self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4}) def lowercase_ ( self : List[str])-> Optional[int]: '''simple docstring''' pass def lowercase_ ( self : Any)-> Optional[int]: '''simple docstring''' __lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict) # create random PIL images __lowerCAmelCase: Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image) # Test not batched input __lowerCAmelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase: int = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowercase_ ( self : int)-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __lowerCAmelCase: List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray) # Test not batched input __lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase: Any = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowercase_ ( self : int)-> str: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __lowerCAmelCase: Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor) # Test not batched input __lowerCAmelCase: Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) @require_torch @require_vision class snake_case ( __snake_case, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None def lowercase_ ( self : int)-> Dict: '''simple docstring''' __lowerCAmelCase: Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = 3 @property def lowercase_ ( self : Union[str, Any])-> Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : int)-> str: '''simple docstring''' __lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase__ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase__ , "size")) self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop")) self.assertTrue(hasattr(UpperCamelCase__ , "center_crop")) self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase__ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase__ , "image_std")) self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb")) def lowercase_ ( self : Tuple)-> Any: '''simple docstring''' pass def lowercase_ ( self : Tuple)-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: Any = self.image_processing_class(**self.image_processor_dict) # create random PIL images __lowerCAmelCase: int = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image) # Test not batched input __lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched __lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
108
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = ['note_seq'] def __init__(self , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _a (cls , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _a (cls , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" requires_backends(cls , ["""note_seq"""] )
171
def a__ ( snake_case = 1_000_000 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = 1 __SCREAMING_SNAKE_CASE : Optional[Any] = 1 __SCREAMING_SNAKE_CASE : Optional[int] = {1: 1} for inputa in range(2 , snake_case ): __SCREAMING_SNAKE_CASE : Tuple = 0 __SCREAMING_SNAKE_CASE : Optional[Any] = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: __SCREAMING_SNAKE_CASE : List[Any] = (3 * number) + 1 counter += 1 if inputa not in counters: __SCREAMING_SNAKE_CASE : str = counter if counter > pre_counter: __SCREAMING_SNAKE_CASE : Optional[int] = inputa __SCREAMING_SNAKE_CASE : str = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
303
0
"""simple docstring""" from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer _A = logging.get_logger(__name__) _A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} _A = { """vocab_file""": { """allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json""" }, """merges_file""": { """allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt""" }, } _A = {"""allegro/herbert-base-cased""": 5_14} _A = {} class lowerCamelCase ( __a ): '''simple docstring''' SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = HerbertTokenizer def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase="</s>" , **_lowerCamelCase , ): """simple docstring""" super().__init__( a__ , a__ , tokenizer_file=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , sep_token=a__ , **a__ , ) def _a (self , _lowerCamelCase , _lowerCamelCase = None ): """simple docstring""" UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : int = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ ) if token_ids_a is None: return [1] + ([0] * len(a__ )) + [1] return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1] def _a (self , _lowerCamelCase , _lowerCamelCase = None ): """simple docstring""" UpperCAmelCase__ : Any = [self.sep_token_id] UpperCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a (self , _lowerCamelCase , _lowerCamelCase = None ): """simple docstring""" UpperCAmelCase__ : int = self._tokenizer.model.save(a__ , name=a__ ) return tuple(a__ )
360
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = dataset UpperCAmelCase__ : Union[str, Any] = process UpperCAmelCase__ : List[Any] = params def __len__(self ): """simple docstring""" return len(self.dataset ) def __getitem__(self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : int = self.dataset[i] UpperCAmelCase__ : Any = self.process(_lowerCamelCase , **self.params ) return processed class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ): """simple docstring""" UpperCAmelCase__ : Tuple = loader UpperCAmelCase__ : int = infer UpperCAmelCase__ : Optional[Any] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : Any = loader_batch_size # Internal bookkeeping UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : Union[str, Any] = None def __len__(self ): """simple docstring""" return len(self.loader ) def __iter__(self ): """simple docstring""" UpperCAmelCase__ : List[str] = iter(self.loader ) return self def _a (self ): """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice UpperCAmelCase__ : Optional[int] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) UpperCAmelCase__ : List[str] = {} for k, element in self._loader_batch_data.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): # Convert ModelOutput to tuple first UpperCAmelCase__ : List[Any] = element.to_tuple() if isinstance(element[0] , torch.Tensor ): UpperCAmelCase__ : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): UpperCAmelCase__ : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCamelCase , _lowerCamelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): UpperCAmelCase__ : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): UpperCAmelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around UpperCAmelCase__ : str = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase__ : Tuple = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase__ : Dict = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. UpperCAmelCase__ : Optional[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 UpperCAmelCase__ : Union[str, Any] = self._loader_batch_data.__class__(_lowerCamelCase ) self._loader_batch_index += 1 return result def _a (self ): """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch UpperCAmelCase__ : str = next(self.iterator ) UpperCAmelCase__ : Union[str, Any] = self.infer(_lowerCamelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_lowerCamelCase , torch.Tensor ): UpperCAmelCase__ : List[Any] = processed else: UpperCAmelCase__ : List[str] = list(processed.keys() )[0] UpperCAmelCase__ : List[str] = processed[key] if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCAmelCase__ : Any = len(_lowerCamelCase ) else: UpperCAmelCase__ : Union[str, Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase__ : Optional[int] = observed_batch_size # Setting internal index to unwrap the batch UpperCAmelCase__ : List[Any] = processed UpperCAmelCase__ : Optional[int] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ): """simple docstring""" super().__init__(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def __iter__(self ): """simple docstring""" UpperCAmelCase__ : Tuple = iter(self.loader ) UpperCAmelCase__ : List[Any] = None return self def _a (self ): """simple docstring""" if self.subiterator is None: UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item UpperCAmelCase__ : List[str] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) UpperCAmelCase__ : List[str] = next(self.subiterator ) return processed class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __iter__(self ): """simple docstring""" UpperCAmelCase__ : str = iter(self.loader ) return self def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase__ : List[Any] = self.loader_batch_item() UpperCAmelCase__ : Dict = item.pop("""is_last""" ) accumulator.append(_lowerCamelCase ) if is_last: return accumulator while not is_last: UpperCAmelCase__ : List[str] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_lowerCamelCase , torch.Tensor ): UpperCAmelCase__ : Dict = processed else: UpperCAmelCase__ : List[Any] = list(processed.keys() )[0] UpperCAmelCase__ : List[Any] = processed[key] if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCAmelCase__ : int = len(_lowerCamelCase ) else: UpperCAmelCase__ : List[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase__ : str = observed_batch_size UpperCAmelCase__ : Union[str, Any] = processed UpperCAmelCase__ : List[Any] = 0 while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase__ : Union[str, Any] = self.loader_batch_item() UpperCAmelCase__ : int = item.pop("""is_last""" ) accumulator.append(_lowerCamelCase ) if is_last: return accumulator else: UpperCAmelCase__ : Any = processed UpperCAmelCase__ : Any = item.pop("""is_last""" ) accumulator.append(_lowerCamelCase ) return accumulator class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = dataset UpperCAmelCase__ : Union[str, Any] = key def __len__(self ): """simple docstring""" return len(self.dataset ) def __getitem__(self , _lowerCamelCase ): """simple docstring""" return self.dataset[i][self.key] class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : int = dataset UpperCAmelCase__ : Any = keya UpperCAmelCase__ : str = keya def __len__(self ): """simple docstring""" return len(self.dataset ) def __getitem__(self , _lowerCamelCase ): """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
166
0
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: lowerCAmelCase__ = json.load(f) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return FSMTTokenizer.from_pretrained(lowercase ) def UpperCamelCase ( self , lowercase ) -> Optional[int]: '''simple docstring''' A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = F'facebook/wmt19-{pair}' A__ = self.get_tokenizer(lowercase ) A__ = self.get_model(lowercase ) A__ = bleu_data[pair]["src"] A__ = bleu_data[pair]["tgt"] A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase ) A__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A__ = tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) A__ = calculate_bleu(lowercase , lowercase ) print(lowercase ) self.assertGreaterEqual(scores["bleu"] , lowercase )
68
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowerCamelCase = datasets.utils.logging.get_logger(__name__) lowerCamelCase = ['names', 'prefix'] lowerCamelCase = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowerCamelCase = ['encoding_errors', 'on_bad_lines'] lowerCamelCase = ['date_format'] @dataclass class A ( datasets.BuilderConfig ): UpperCamelCase__ : str ="," UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : Optional[Union[int, List[int], str]] ="infer" UpperCamelCase__ : Optional[List[str]] =None UpperCamelCase__ : Optional[List[str]] =None UpperCamelCase__ : Optional[Union[int, str, List[int], List[str]]] =None UpperCamelCase__ : Optional[Union[List[int], List[str]]] =None UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : bool =True UpperCamelCase__ : Optional[Literal["c", "python", "pyarrow"]] =None UpperCamelCase__ : Dict[Union[int, str], Callable[[Any], Any]] =None UpperCamelCase__ : Optional[list] =None UpperCamelCase__ : Optional[list] =None UpperCamelCase__ : bool =False UpperCamelCase__ : Optional[Union[int, List[int]]] =None UpperCamelCase__ : Optional[int] =None UpperCamelCase__ : Optional[Union[str, List[str]]] =None UpperCamelCase__ : bool =True UpperCamelCase__ : bool =True UpperCamelCase__ : bool =False UpperCamelCase__ : bool =True UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : str ="." UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : str ='"' UpperCamelCase__ : int =0 UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : bool =True UpperCamelCase__ : bool =True UpperCamelCase__ : int =0 UpperCamelCase__ : bool =True UpperCamelCase__ : bool =False UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : int =10000 UpperCamelCase__ : Optional[datasets.Features] =None UpperCamelCase__ : Optional[str] ="strict" UpperCamelCase__ : Literal["error", "warn", "skip"] ="error" UpperCamelCase__ : Optional[str] =None def lowerCamelCase ( self : List[str] ) -> str: """simple docstring""" if self.delimiter is not None: _lowerCamelCase : Union[str, Any] =self.delimiter if self.column_names is not None: _lowerCamelCase : Dict =self.column_names @property def lowerCamelCase ( self : int ) -> int: """simple docstring""" _lowerCamelCase : Any ={ 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowercase_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A ( datasets.ArrowBasedBuilder ): UpperCamelCase__ : List[str] =CsvConfig def lowerCamelCase ( self : Any ) -> Tuple: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase ( self : Tuple , lowercase_ : Any ) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) _lowerCamelCase : int =dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowercase_ , (str, list, tuple) ): _lowerCamelCase : List[Any] =data_files if isinstance(lowercase_ , lowercase_ ): _lowerCamelCase : Union[str, Any] =[files] _lowerCamelCase : Union[str, Any] =[dl_manager.iter_files(lowercase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] _lowerCamelCase : Optional[int] =[] for split_name, files in data_files.items(): if isinstance(lowercase_ , lowercase_ ): _lowerCamelCase : Dict =[files] _lowerCamelCase : Optional[int] =[dl_manager.iter_files(lowercase_ ) for file in files] splits.append(datasets.SplitGenerator(name=lowercase_ , gen_kwargs={'files': files} ) ) return splits def lowerCamelCase ( self : int , lowercase_ : pa.Table ) -> pa.Table: """simple docstring""" if self.config.features is not None: _lowerCamelCase : List[str] =self.config.features.arrow_schema if all(not require_storage_cast(lowercase_ ) for feature in self.config.features.values() ): # cheaper cast _lowerCamelCase : Optional[int] =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowercase_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _lowerCamelCase : List[Any] =table_cast(lowercase_ , lowercase_ ) return pa_table def lowerCamelCase ( self : Optional[Any] , lowercase_ : List[Any] ) -> Any: """simple docstring""" _lowerCamelCase : str =self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _lowerCamelCase : Union[str, Any] =( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowercase_ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase_ ) ): _lowerCamelCase : Union[str, Any] =pd.read_csv(lowercase_ , iterator=lowercase_ , dtype=lowercase_ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowercase_ ): _lowerCamelCase : Tuple =pa.Table.from_pandas(lowercase_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowercase_ ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(lowercase_ )}: {e}''' ) raise
199
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase : Dict = """▁""" _lowercase : List[Any] = {"""vocab_file""": """spiece.model"""} _lowercase : str = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""} } _lowercase : str = { """google/pegasus-xsum""": 5_1_2, } _lowercase : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( lowerCamelCase__ ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ['input_ids', 'attention_mask'] def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<mask_2>" , __SCREAMING_SNAKE_CASE="<mask_1>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1_03 , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" lowercase_ : Union[str, Any] = offset if additional_special_tokens is not None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError( F'''additional_special_tokens should be of type {type(__SCREAMING_SNAKE_CASE )}, but is''' F''' {type(__SCREAMING_SNAKE_CASE )}''' ) lowercase_ : int = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F'''<unk_{i}>''' for i in range(len(__SCREAMING_SNAKE_CASE ) , self.offset - 1 ) ] if len(set(__SCREAMING_SNAKE_CASE ) ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) lowercase_ : List[str] = additional_special_tokens_extended else: lowercase_ : int = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )] lowercase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token_sent=__SCREAMING_SNAKE_CASE , offset=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) lowercase_ : List[Any] = mask_token_sent lowercase_ : Tuple = vocab_file lowercase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # add special tokens to encoder dict lowercase_ : Optional[Any] = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) lowercase_ : str = {v: k for k, v in self.encoder.items()} @property def _snake_case ( self ): """simple docstring""" return len(self.sp_model ) + self.offset def _snake_case ( self ): """simple docstring""" lowercase_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowercase_ : Optional[Any] = self.__dict__.copy() lowercase_ : str = None return state def __setstate__( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase_ : Tuple = {} lowercase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] lowercase_ : Dict = self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE ) return sp_id + self.offset def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: lowercase_ : Any = self.sp_model.IdToPiece(index - self.offset ) return token def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Optional[int] = [] lowercase_ : Union[str, Any] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token lowercase_ : str = [] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string.strip() def _snake_case ( self , __SCREAMING_SNAKE_CASE=False ): """simple docstring""" return 1 def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(__SCREAMING_SNAKE_CASE ) elif token_ids_a is None: return self._special_token_mask(__SCREAMING_SNAKE_CASE ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : Tuple = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi: lowercase_ : int = self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
356
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : List[Any] = logging.get_logger(__name__) _lowercase : Optional[Any] = { "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class lowerCAmelCase__ ( lowerCamelCase_ ): lowerCAmelCase_ = '''vit''' def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = hidden_size lowercase_ : Dict = num_hidden_layers lowercase_ : List[Any] = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : Union[str, Any] = hidden_act lowercase_ : Dict = hidden_dropout_prob lowercase_ : List[str] = attention_probs_dropout_prob lowercase_ : Any = initializer_range lowercase_ : Tuple = layer_norm_eps lowercase_ : Union[str, Any] = image_size lowercase_ : Tuple = patch_size lowercase_ : Tuple = num_channels lowercase_ : Union[str, Any] = qkv_bias lowercase_ : List[Any] = encoder_stride class lowerCAmelCase__ ( lowerCamelCase_ ): lowerCAmelCase_ = version.parse('''1.11''' ) @property def _snake_case ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _snake_case ( self ): """simple docstring""" return 1E-4
264
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : Any = logging.get_logger() @dataclass class __lowerCAmelCase : _lowercase : nn.Module _lowercase : List[nn.Module] = field(default_factory=UpperCamelCase__) _lowercase : list = field(default_factory=UpperCamelCase__) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: '''simple docstring''' a__ : Tuple =len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self , lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class __lowerCAmelCase : _lowercase : nn.Module _lowercase : nn.Module _lowercase : int = 0 _lowercase : List = field(default_factory=UpperCamelCase__) _lowercase : List = field(default_factory=UpperCamelCase__) def __call__( self , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' a__ : Any =Tracker(self.dest )(lowerCAmelCase__ ).parametrized a__ : List[str] =Tracker(self.src )(lowerCAmelCase__ ).parametrized a__ : Tuple =list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) a__ : Any =list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise Exception( F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while''' F''' destination module has {len(lowerCAmelCase__ )}.''' ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : ResNetConfig , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" print(f'''Converting {name}...''' ) with torch.no_grad(): a__ : Tuple =timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE ).eval() a__ : str =ResNetForImageClassification(SCREAMING_SNAKE_CASE ).eval() a__ : List[str] =ModuleTransfer(src=SCREAMING_SNAKE_CASE , dest=SCREAMING_SNAKE_CASE ) a__ : Optional[Any] =torch.randn((1, 3, 224, 224) ) module_transfer(SCREAMING_SNAKE_CASE ) assert torch.allclose(from_model(SCREAMING_SNAKE_CASE ) , our_model(SCREAMING_SNAKE_CASE ).logits ), "The model logits don't match the original one." a__ : Union[str, Any] =f'''resnet{"-".join(name.split("resnet" ) )}''' print(SCREAMING_SNAKE_CASE ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE , ) # we can use the convnext one a__ : Optional[int] =AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE , ) print(f'''Pushed {checkpoint_name}''' ) def _A ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" a__ : int ="imagenet-1k-id2label.json" a__ : List[str] =1_000 a__ : List[Any] =(1, num_labels) a__ : str ="huggingface/label-files" a__ : Optional[int] =num_labels a__ : Union[str, Any] =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) a__ : str ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} a__ : int =idalabel a__ : Dict ={v: k for k, v in idalabel.items()} a__ : str =partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE ) a__ : Optional[Any] ={ "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(SCREAMING_SNAKE_CASE , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return config, expected_shape if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) UpperCAmelCase : str = parser.parse_args() UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
95
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class __snake_case ( unittest.TestCase ): @require_torch def __a ( self ) -> Optional[Any]: '''simple docstring''' snake_case__ : Optional[Any] = pipeline( task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' ) snake_case__ : Union[str, Any] = load_dataset('ashraq/esc50' ) snake_case__ : List[Any] = dataset['train']['audio'][-1]['array'] snake_case__ : Tuple = audio_classifier(__UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [{'score': 0.5_0_1, 'label': 'Sound of a dog'}, {'score': 0.4_9_9, 'label': 'Sound of vaccum cleaner'}] , ) @unittest.skip('No models are available in TF' ) def __a ( self ) -> List[str]: '''simple docstring''' pass @slow @require_torch def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Tuple = pipeline( task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , ) # This is an audio of a dog snake_case__ : Dict = load_dataset('ashraq/esc50' ) snake_case__ : Optional[int] = dataset['train']['audio'][-1]['array'] snake_case__ : Tuple = audio_classifier(__UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [ {'score': 0.9_9_9, 'label': 'Sound of a dog'}, {'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'}, ] , ) snake_case__ : str = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [ [ {'score': 0.9_9_9, 'label': 'Sound of a dog'}, {'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'}, ], ] * 5 , ) snake_case__ : Tuple = audio_classifier( [audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 ) self.assertEqual( nested_simplify(__UpperCamelCase ) , [ [ {'score': 0.9_9_9, 'label': 'Sound of a dog'}, {'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'}, ], ] * 5 , ) @unittest.skip('No models are available in TF' ) def __a ( self ) -> Any: '''simple docstring''' pass
143
0
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters UpperCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Any=None ) -> Optional[Any]: '''simple docstring''' if "." in tensor_name: _snake_case = tensor_name.split('.' ) for split in splits[:-1]: _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) _snake_case = new_module _snake_case = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) _snake_case = tensor_name in module._buffers _snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) _snake_case = False _snake_case = False if is_buffer or not is_bitsandbytes_available(): _snake_case = False _snake_case = False else: _snake_case = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) _snake_case = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: _snake_case = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: _snake_case = old_value.to(UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , torch.Tensor ): _snake_case = value.to('cpu' ) if value.dtype == torch.inta: _snake_case = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: _snake_case = torch.tensor(UpperCamelCase__ , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , UpperCamelCase__ ) and fpaa_statistics is None: _snake_case = new_value.T _snake_case = old_value.__dict__ if is_abit: _snake_case = bnb.nn.IntaParams(UpperCamelCase__ , requires_grad=UpperCamelCase__ , **UpperCamelCase__ ).to(UpperCamelCase__ ) elif is_abit: _snake_case = bnb.nn.Paramsabit(UpperCamelCase__ , requires_grad=UpperCamelCase__ , **UpperCamelCase__ ).to(UpperCamelCase__ ) _snake_case = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(UpperCamelCase__ ) ) else: if value is None: _snake_case = old_value.to(UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , torch.Tensor ): _snake_case = value.to(UpperCamelCase__ ) else: _snake_case = torch.tensor(UpperCamelCase__ , device=UpperCamelCase__ ) if is_buffer: _snake_case = new_value else: _snake_case = nn.Parameter(UpperCamelCase__ , requires_grad=old_value.requires_grad ) _snake_case = new_value def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=False ) -> Optional[Any]: '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: _snake_case = [] current_key_name.append(UpperCamelCase__ ) if (isinstance(UpperCamelCase__ , nn.Linear ) or isinstance(UpperCamelCase__ , UpperCamelCase__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(UpperCamelCase__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case , _snake_case = module.weight.shape else: _snake_case = module.in_features _snake_case = module.out_features if quantization_config.quantization_method() == "llm_int8": _snake_case = bnb.nn.LinearabitLt( UpperCamelCase__ , UpperCamelCase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) _snake_case = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: _snake_case = bnb.nn.Linearabit( UpperCamelCase__ , UpperCamelCase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) _snake_case = True # Store the module class in case we need to transpose the weight later _snake_case = type(UpperCamelCase__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(UpperCamelCase__ ) if len(list(module.children() ) ) > 0: _snake_case , _snake_case = _replace_with_bnb_linear( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_been_replaced=UpperCamelCase__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None ) -> List[Any]: '''simple docstring''' _snake_case = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert _snake_case , _snake_case = _replace_with_bnb_linear( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def lowerCamelCase__ ( *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , UpperCamelCase__ , ) return replace_with_bnb_linear(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase__ ( *UpperCamelCase__ : str , **UpperCamelCase__ : Any ) -> Optional[int]: '''simple docstring''' warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , UpperCamelCase__ , ) return set_module_quantized_tensor_to_device(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> Tuple: '''simple docstring''' _snake_case = deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() _snake_case = find_tied_parameters(UpperCamelCase__ ) # For compatibility with Accelerate < 0.18 if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: _snake_case = sum(UpperCamelCase__ , [] ) _snake_case = len(UpperCamelCase__ ) > 0 # Check if it is a base model _snake_case = not hasattr(UpperCamelCase__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _snake_case = list(model.named_children() ) _snake_case = [list_modules[-1][0]] # add last module together with tied weights _snake_case = set(UpperCamelCase__ ) - set(UpperCamelCase__ ) _snake_case = list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ ) # remove ".weight" from the keys _snake_case = ['.weight', '.bias'] _snake_case = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _snake_case = name.replace(UpperCamelCase__ , '' ) filtered_module_names.append(UpperCamelCase__ ) return filtered_module_names
295
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool: '''simple docstring''' if curr_ind == len(UpperCamelCase__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCamelCase__ ) ): if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # Insert current vertex into path as next transition _snake_case = next_ver # Validate created path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ): return True # Backtrack _snake_case = -1 return False def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]: '''simple docstring''' _snake_case = [-1] * (len(UpperCamelCase__ ) + 1) # initialize start and end of path with starting index _snake_case = _snake_case = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
295
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : int ="dpr" def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__ = 0 , **snake_case__ , ): """simple docstring""" super().__init__(pad_token_id=snake_case__ , **snake_case__ ) lowerCAmelCase : Union[str, Any] = vocab_size lowerCAmelCase : str = hidden_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : Optional[int] = num_attention_heads lowerCAmelCase : Union[str, Any] = hidden_act lowerCAmelCase : Dict = intermediate_size lowerCAmelCase : Union[str, Any] = hidden_dropout_prob lowerCAmelCase : Dict = attention_probs_dropout_prob lowerCAmelCase : Dict = max_position_embeddings lowerCAmelCase : Tuple = type_vocab_size lowerCAmelCase : Any = initializer_range lowerCAmelCase : Any = layer_norm_eps lowerCAmelCase : Dict = projection_dim lowerCAmelCase : Dict = position_embedding_type
108
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : int ="dpr" def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__ = 0 , **snake_case__ , ): """simple docstring""" super().__init__(pad_token_id=snake_case__ , **snake_case__ ) lowerCAmelCase : Union[str, Any] = vocab_size lowerCAmelCase : str = hidden_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : Optional[int] = num_attention_heads lowerCAmelCase : Union[str, Any] = hidden_act lowerCAmelCase : Dict = intermediate_size lowerCAmelCase : Union[str, Any] = hidden_dropout_prob lowerCAmelCase : Dict = attention_probs_dropout_prob lowerCAmelCase : Dict = max_position_embeddings lowerCAmelCase : Tuple = type_vocab_size lowerCAmelCase : Any = initializer_range lowerCAmelCase : Any = layer_norm_eps lowerCAmelCase : Dict = projection_dim lowerCAmelCase : Dict = position_embedding_type
108
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Optional[torch.FloatTensor] = None snake_case__ : torch.FloatTensor = None snake_case__ : Optional[Tuple[torch.FloatTensor]] = None snake_case__ : Optional[Tuple[torch.FloatTensor]] = None class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Union[str, Any]="cls" , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> Union[str, Any]: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = project_dim __SCREAMING_SNAKE_CASE = pooler_fn __SCREAMING_SNAKE_CASE = learn_encoder __SCREAMING_SNAKE_CASE = use_attention_mask class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Tuple = [R"pooler", R"logit_scale"] snake_case__ : List[Any] = [R"position_ids", R"predictions.decoder.bias"] snake_case__ : str = "roberta" snake_case__ : Optional[int] = RobertaSeriesConfig def __init__( self : Any , UpperCAmelCase__ : str ) -> Tuple: super().__init__(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = XLMRobertaModel(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim ) __SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , "has_pre_transformation" , UpperCAmelCase__ ) if self.has_pre_transformation: __SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim ) __SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.base_model( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_attentions=UpperCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCAmelCase__ , ) if self.has_pre_transformation: __SCREAMING_SNAKE_CASE = outputs["hidden_states"][-2] __SCREAMING_SNAKE_CASE = self.pre_LN(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.transformation_pre(UpperCAmelCase__ ) return TransformationModelOutput( projection_state=UpperCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=UpperCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
195
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch a__ : Optional[int] = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Optional[Any] = ["pixel_values"] def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> None: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_2_4} __SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6} __SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" ) __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = resample __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = crop_size __SCREAMING_SNAKE_CASE = do_flip_channel_order def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PIL.Image.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> np.ndarray: __SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" ) __SCREAMING_SNAKE_CASE = get_resize_output_image_size(UpperCAmelCase__ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase__ ) return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ) -> np.ndarray: __SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) -> Dict: return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: return flip_channel_order(UpperCAmelCase__ , data_format=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ) -> PIL.Image.Image: __SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop __SCREAMING_SNAKE_CASE = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) __SCREAMING_SNAKE_CASE = size if size is not None else self.size __SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size __SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" ) __SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images] if do_center_crop: __SCREAMING_SNAKE_CASE = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: __SCREAMING_SNAKE_CASE = [self.flip_channel_order(image=UpperCAmelCase__ ) for image in images] __SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] __SCREAMING_SNAKE_CASE = {"pixel_values": images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) -> Optional[int]: __SCREAMING_SNAKE_CASE = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = target_sizes.numpy() __SCREAMING_SNAKE_CASE = [] for idx in range(len(UpperCAmelCase__ ) ): __SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = logits.argmax(dim=1 ) __SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
195
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case : Optional[Any] = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : str = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : List[str] = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys snake_case : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
94
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self : Any): '''simple docstring''' __lowercase =0 @slow def __lowerCamelCase ( self : Dict): '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase) self.assertIsNotNone(_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast)) self.assertGreater(len(_lowerCAmelCase) , 0) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase) self.assertIsNotNone(_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast)) self.assertGreater(len(_lowerCAmelCase) , 0) def __lowerCamelCase ( self : Dict): '''simple docstring''' __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast)) self.assertEqual(tokenizer.vocab_size , 1_2) def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast)) self.assertEqual(tokenizer.vocab_size , 2_0) def __lowerCamelCase ( self : Dict): '''simple docstring''' __lowercase =AutoConfig.from_pretrained(_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase) # Check that tokenizer_type ≠ model_type __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast)) self.assertEqual(tokenizer.vocab_size , 1_2) def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowerCAmelCase , 'vocab.txt')) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='bert' , use_fast=_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowerCAmelCase , 'vocab.json')) shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowerCAmelCase , 'merges.txt')) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='gpt2' , use_fast=_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase) @require_tokenizers def __lowerCamelCase ( self : int): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowerCAmelCase , 'vocab.txt')) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='bert') self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowerCAmelCase , 'vocab.json')) shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowerCAmelCase , 'merges.txt')) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='gpt2') self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase) def __lowerCamelCase ( self : Any): '''simple docstring''' with pytest.raises(_lowerCAmelCase): AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx') @require_tokenizers def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: __lowercase =tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased') self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast)) if isinstance(_lowerCAmelCase , _lowerCAmelCase): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _lowerCAmelCase) else: self.assertEqual(tokenizer.do_lower_case , _lowerCAmelCase) self.assertEqual(tokenizer.model_max_length , 5_1_2) @require_tokenizers def __lowerCamelCase ( self : List[Any]): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _lowerCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ): __lowercase =tokenizer_class.from_pretrained('julien-c/herlolip-not-exists') def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase =TOKENIZER_MAPPING.values() __lowercase =[] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_lowerCAmelCase) @require_tokenizers def __lowerCamelCase ( self : int): '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_lowerCAmelCase) , _lowerCAmelCase) self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased') , _lowerCAmelCase) @require_tokenizers def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' __lowercase =AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_lowerCAmelCase) __lowercase ='Hello, world. How are you?' __lowercase =tokenizer.tokenize(_lowerCAmelCase) self.assertEqual('[UNK]' , tokens[0]) __lowercase =AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_lowerCAmelCase) __lowercase =tokenizer.tokenize(_lowerCAmelCase) self.assertEqual('[UNK]' , tokens[0]) @require_tokenizers def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __lowercase =AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config') self.assertEqual(type(_lowerCAmelCase) , _lowerCAmelCase) self.assertEqual(tokenizer.model_max_length , 5_1_2) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0) self.assertEqual(tokenizer.unk_token , '[UNK]') self.assertEqual(tokenizer.padding_side , 'right') self.assertEqual(tokenizer.truncation_side , 'right') def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast)) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCAmelCase) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , tokenizer.__class__) self.assertEqual(tokenizera.vocab_size , 1_2) def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __lowercase =AutoTokenizer.from_pretrained('ctrl') # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase) def __lowerCamelCase ( self : List[str]): '''simple docstring''' __lowercase =get_tokenizer_config('bert-base-cased') __lowercase =config.pop('_commit_hash' , _lowerCAmelCase) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_lowerCAmelCase , {'do_lower_case': False}) # This model does not have a tokenizer_config so we get back an empty dict. __lowercase =get_tokenizer_config(_lowerCAmelCase) self.assertDictEqual(_lowerCAmelCase , {}) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCAmelCase) __lowercase =get_tokenizer_config(_lowerCAmelCase) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['tokenizer_class'] , 'BertTokenizer') def __lowerCamelCase ( self : List[Any]): '''simple docstring''' try: AutoConfig.register('custom' , _lowerCAmelCase) AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_lowerCAmelCase): AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase) __lowercase =CustomTokenizer.from_pretrained(_lowerCAmelCase) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCAmelCase) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def __lowerCamelCase ( self : int): '''simple docstring''' try: AutoConfig.register('custom' , _lowerCAmelCase) # Can register in two steps AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None)) AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast)) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast)) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_lowerCAmelCase): AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: __lowercase =BertTokenizerFast.from_pretrained(_lowerCAmelCase) bert_tokenizer.save_pretrained(_lowerCAmelCase) __lowercase =CustomTokenizerFast.from_pretrained(_lowerCAmelCase) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCAmelCase) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , use_fast=_lowerCAmelCase) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' with self.assertRaises(_lowerCAmelCase): __lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer') # If remote code is disabled, we can't load this config. with self.assertRaises(_lowerCAmelCase): __lowercase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase) __lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase) self.assertTrue(tokenizer.special_attribute_present) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCAmelCase) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase) self.assertTrue(reloaded_tokenizer.special_attribute_present) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast') self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast') # Test we can also load the slow version __lowercase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase) self.assertTrue(tokenizer.special_attribute_present) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer') # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCAmelCase) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer') self.assertTrue(reloaded_tokenizer.special_attribute_present) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer') self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer') @require_tokenizers def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' class _UpperCamelCase ( A ): '''simple docstring''' lowerCAmelCase__ = False class _UpperCamelCase ( A ): '''simple docstring''' lowerCAmelCase__ = NewTokenizer lowerCAmelCase__ = False try: AutoConfig.register('custom' , _lowerCAmelCase) AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase) AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase) # If remote code is not set, the default is to use local __lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer') self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast') self.assertFalse(tokenizer.special_attribute_present) __lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_lowerCAmelCase) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer') self.assertFalse(tokenizer.special_attribute_present) # If remote code is disabled, we load the local one. __lowercase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast') self.assertFalse(tokenizer.special_attribute_present) __lowercase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer') self.assertFalse(tokenizer.special_attribute_present) # If remote is enabled, we load from the Hub __lowercase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast') self.assertTrue(tokenizer.special_attribute_present) __lowercase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer') self.assertTrue(tokenizer.special_attribute_present) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowerCAmelCase) self.assertTrue(tokenizer.special_attribute_present) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast') # Test we can also load the slow version __lowercase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase) self.assertTrue(tokenizer.special_attribute_present) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer') else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer') def __lowerCamelCase ( self : List[str]): '''simple docstring''' with self.assertRaisesRegex( _lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'): __lowercase =AutoTokenizer.from_pretrained('bert-base') def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' with self.assertRaisesRegex( _lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'): __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , revision='aaaaaa') def __lowerCamelCase ( self : Dict): '''simple docstring''' __lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert') with RequestCounter() as counter: __lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert') self.assertEqual(counter.get_request_count , 0) self.assertEqual(counter.head_request_count , 1) self.assertEqual(counter.other_request_count , 0)
166
0
def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def __lowerCamelCase ( snake_case__ = 1_00 ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 2 for i in range(2 ,max_n + 1 ): _SCREAMING_SNAKE_CASE = pre_numerator _SCREAMING_SNAKE_CASE = 2 * i // 3 if i % 3 == 0 else 1 _SCREAMING_SNAKE_CASE = cur_numerator _SCREAMING_SNAKE_CASE = e_cont * pre_numerator + temp return sum_digits(snake_case__ ) if __name__ == "__main__": print(f"{solution() = }")
364
import copy import re class __UpperCAmelCase : __snake_case : Any = "hp" __snake_case : str = {} __snake_case : List[Any] = None @classmethod def UpperCamelCase ( cls: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = prefix _SCREAMING_SNAKE_CASE = defaults cls.build_naming_info() @staticmethod def UpperCamelCase ( UpperCAmelCase_: Any , UpperCAmelCase_: str ): '''simple docstring''' if len(UpperCAmelCase_ ) == 0: return "" _SCREAMING_SNAKE_CASE = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(UpperCAmelCase_ ) + 1 ): _SCREAMING_SNAKE_CASE = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _SCREAMING_SNAKE_CASE = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(UpperCAmelCase_: List[Any] ): _SCREAMING_SNAKE_CASE = """""" while integer != 0: _SCREAMING_SNAKE_CASE = chr(ord("""A""" ) + integer % 10 ) + s integer //= 10 return s _SCREAMING_SNAKE_CASE = 0 while True: _SCREAMING_SNAKE_CASE = word + """#""" + int_to_alphabetic(UpperCAmelCase_ ) if sword in info["reverse_short_word"]: continue else: _SCREAMING_SNAKE_CASE = sword break _SCREAMING_SNAKE_CASE = short_word _SCREAMING_SNAKE_CASE = word return short_word @staticmethod def UpperCamelCase ( UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = param_name.split("""_""" ) _SCREAMING_SNAKE_CASE = [TrialShortNamer.shortname_for_word(UpperCAmelCase_ , UpperCAmelCase_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _SCREAMING_SNAKE_CASE = ["""""", """_"""] for separator in separators: _SCREAMING_SNAKE_CASE = separator.join(UpperCAmelCase_ ) if shortname not in info["reverse_short_param"]: _SCREAMING_SNAKE_CASE = shortname _SCREAMING_SNAKE_CASE = param_name return shortname return param_name @staticmethod def UpperCamelCase ( UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = TrialShortNamer.shortname_for_key(UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = short_name _SCREAMING_SNAKE_CASE = param_name @classmethod def UpperCamelCase ( cls: str ): '''simple docstring''' if cls.NAMING_INFO is not None: return _SCREAMING_SNAKE_CASE = { """short_word""": {}, """reverse_short_word""": {}, """short_param""": {}, """reverse_short_param""": {}, } _SCREAMING_SNAKE_CASE = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = info @classmethod def UpperCamelCase ( cls: Any , UpperCAmelCase_: Optional[Any] ): '''simple docstring''' cls.build_naming_info() assert cls.PREFIX is not None _SCREAMING_SNAKE_CASE = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _SCREAMING_SNAKE_CASE = cls.NAMING_INFO["""short_param"""][k] if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): _SCREAMING_SNAKE_CASE = 1 if v else 0 _SCREAMING_SNAKE_CASE = """""" if isinstance(UpperCAmelCase_ , (int, float) ) else """-""" _SCREAMING_SNAKE_CASE = F'{key}{sep}{v}' name.append(UpperCAmelCase_ ) return "_".join(UpperCAmelCase_ ) @classmethod def UpperCamelCase ( cls: int , UpperCAmelCase_: int ): '''simple docstring''' _SCREAMING_SNAKE_CASE = repr[len(cls.PREFIX ) + 1 :] if repr == "": _SCREAMING_SNAKE_CASE = [] else: _SCREAMING_SNAKE_CASE = repr.split("""_""" ) _SCREAMING_SNAKE_CASE = {} for value in values: if "-" in value: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = value.split("""-""" ) else: _SCREAMING_SNAKE_CASE = re.sub("""[0-9.]""" , """""" , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = float(re.sub("""[^0-9.]""" , """""" , UpperCAmelCase_ ) ) _SCREAMING_SNAKE_CASE = cls.NAMING_INFO["""reverse_short_param"""][p_k] _SCREAMING_SNAKE_CASE = p_v for k in cls.DEFAULTS: if k not in parameters: _SCREAMING_SNAKE_CASE = cls.DEFAULTS[k] return parameters
125
0
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase__ : Optional[Any] = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowercase__ : Optional[int] = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def __lowercase ( _a , _a=False ): snake_case_, snake_case_ : int = create_model( '''HTSAT-tiny''' , '''roberta''' , _a , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_a , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def __lowercase ( _a ): snake_case_ : List[str] = {} snake_case_ : List[Any] = r'''.*sequential.(\d+).*''' snake_case_ : str = r'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: snake_case_ : List[str] = key.replace(_a , _a ) if re.match(_a , _a ): # replace sequential layers with list snake_case_ : Tuple = re.match(_a , _a ).group(1 ) snake_case_ : List[Any] = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_a )//3}.linear." ) elif re.match(_a , _a ): snake_case_ : Optional[int] = int(re.match(_a , _a ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... snake_case_ : Tuple = 1 if projecton_layer == 0 else 2 snake_case_ : Optional[Any] = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." ) if "audio" and "qkv" in key: # split qkv into query key and value snake_case_ : Dict = value snake_case_ : Tuple = mixed_qkv.size(0 ) // 3 snake_case_ : List[Any] = mixed_qkv[:qkv_dim] snake_case_ : int = mixed_qkv[qkv_dim : qkv_dim * 2] snake_case_ : Union[str, Any] = mixed_qkv[qkv_dim * 2 :] snake_case_ : Optional[int] = query_layer snake_case_ : int = key_layer snake_case_ : Tuple = value_layer else: snake_case_ : str = value return model_state_dict def __lowercase ( _a , _a , _a , _a=False ): snake_case_, snake_case_ : List[Any] = init_clap(_a , enable_fusion=_a ) clap_model.eval() snake_case_ : int = clap_model.state_dict() snake_case_ : Dict = rename_state_dict(_a ) snake_case_ : str = ClapConfig() snake_case_ : List[str] = enable_fusion snake_case_ : List[Any] = ClapModel(_a ) # ignore the spectrogram embedding layer model.load_state_dict(_a , strict=_a ) model.save_pretrained(_a ) transformers_config.save_pretrained(_a ) if __name__ == "__main__": lowercase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowercase__ : List[Any] = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
264
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[Any] = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( lowerCAmelCase__): _lowerCAmelCase : List[Any] = """gpt_neox""" def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ): super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) snake_case_ : List[str] = vocab_size snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : str = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : List[Any] = intermediate_size snake_case_ : List[Any] = hidden_act snake_case_ : str = rotary_pct snake_case_ : Dict = rotary_emb_base snake_case_ : Optional[int] = attention_dropout snake_case_ : Tuple = hidden_dropout snake_case_ : Tuple = classifier_dropout snake_case_ : List[str] = initializer_range snake_case_ : Union[str, Any] = layer_norm_eps snake_case_ : Any = use_cache snake_case_ : Optional[int] = tie_word_embeddings snake_case_ : Any = use_parallel_residual snake_case_ : Union[str, Any] = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def _snake_case ( self : Optional[int] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"got {self.rope_scaling}" ) snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ ) snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
264
1
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") lowercase : Optional[Any] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) lowercase : Optional[Any] = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(1_0_0_0_0): out_file.write(data) lowercase : Any = BeautifulSoup(res.text, """html.parser""") lowercase : Tuple = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(F"""https://google.com{link.get('href')}""")
225
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def A_ ( A__ ) -> Dict: if isinstance(A__ , collections.abc.Iterable ): return x return (x, x) @require_tf class A__ : """simple docstring""" def __lowercase ( self , lowercase , lowercase) -> Union[str, Any]: '''simple docstring''' pass def __lowercase ( self) -> Dict: '''simple docstring''' pass def __lowercase ( self) -> Dict: '''simple docstring''' pass def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]: '''simple docstring''' a__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase) a__ : Any = TFVisionTextDualEncoderModel(lowercase) a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim)) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]: '''simple docstring''' a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase) a__ : List[str] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase) a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Tuple: '''simple docstring''' a__ , a__ : Any = self.get_vision_text_model(lowercase , lowercase) a__ : Tuple = {'vision_model': vision_model, 'text_model': text_model} a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase) a__ : Any = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]: '''simple docstring''' a__ , a__ : int = self.get_vision_text_model(lowercase , lowercase) a__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase) a__ : Optional[Any] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase) a__ : int = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase) a__ : str = TFVisionTextDualEncoderModel.from_pretrained(lowercase) a__ : List[str] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase) a__ : str = after_output[0].numpy() a__ : str = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(lowercase , 1e-5) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[int]: '''simple docstring''' a__ , a__ : Optional[Any] = self.get_vision_text_model(lowercase , lowercase) a__ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase) a__ : Optional[int] = model( input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase) a__ : List[str] = output.vision_model_output.attentions self.assertEqual(len(lowercase) , vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[Any] = to_atuple(vision_model.config.image_size) a__ : Dict = to_atuple(vision_model.config.patch_size) a__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) a__ : List[str] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) a__ : Union[str, Any] = output.text_model_output.attentions self.assertEqual(len(lowercase) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __lowercase ( self , lowercase , lowercase , lowercase) -> Any: '''simple docstring''' a__ : str = np.abs((a - b)).max() self.assertLessEqual(lowercase , lowercase , F'Difference between torch and flax is {diff} (>= {tol}).') def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**lowercase) def __lowercase ( self) -> Dict: '''simple docstring''' a__ : List[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase) def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Any = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase) def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : int = self.prepare_config_and_inputs() self.check_save_load(**lowercase) def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : List[str] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase) @slow def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ , a__ : Union[str, Any] = self.get_pretrained_model_and_inputs() a__ : Optional[int] = model_a(**lowercase) a__ : int = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase) a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase) a__ : int = model_a(**lowercase) a__ : str = after_outputs[0].numpy() a__ : List[Any] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(lowercase , 1e-5) @require_tf class A__ ( __UpperCAmelCase , unittest.TestCase ): """simple docstring""" def __lowercase ( self) -> List[Any]: '''simple docstring''' a__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert') a__ : str = 13 a__ : Optional[int] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) a__ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) a__ : Optional[int] = random_attention_mask([batch_size, 4]) a__ : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __lowercase ( self , lowercase , lowercase) -> List[Any]: '''simple docstring''' a__ : Optional[Any] = TFViTModel(lowercase , name='vision_model') a__ : Tuple = TFBertModel(lowercase , name='text_model') return vision_model, text_model def __lowercase ( self) -> Any: '''simple docstring''' a__ : Tuple = TFViTModelTester(self) a__ : int = TFBertModelTester(self) a__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs() a__ : Any = bert_model_tester.prepare_config_and_inputs() a__ , a__ , a__ : Optional[int] = vision_config_and_inputs ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : Any = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class A__ ( __UpperCAmelCase , unittest.TestCase ): """simple docstring""" def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta') a__ : Union[str, Any] = 13 a__ : Dict = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) a__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) a__ : Any = random_attention_mask([batch_size, 4]) a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]: '''simple docstring''' a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase) a__ : Any = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase) a__ : int = model( input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase) a__ : Optional[Any] = output.vision_model_output.attentions self.assertEqual(len(lowercase) , vision_config.num_hidden_layers) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : Optional[int] = to_atuple(vision_model.config.image_size) a__ : str = to_atuple(vision_model.config.patch_size) a__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) a__ : List[str] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) a__ : List[Any] = output.text_model_output.attentions self.assertEqual(len(lowercase) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __lowercase ( self , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' a__ : List[str] = TFDeiTModel(lowercase , name='vision_model') a__ : Optional[int] = TFRobertaModel(lowercase , name='text_model') return vision_model, text_model def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Optional[int] = TFDeiTModelTester(self) a__ : str = TFRobertaModelTester(self) a__ : str = vit_model_tester.prepare_config_and_inputs() a__ : Dict = bert_model_tester.prepare_config_and_inputs() a__ , a__ , a__ : Any = vision_config_and_inputs ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class A__ ( __UpperCAmelCase , unittest.TestCase ): """simple docstring""" def __lowercase ( self) -> Any: '''simple docstring''' a__ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert') a__ : Optional[int] = 13 a__ : Optional[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) a__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) a__ : str = random_attention_mask([batch_size, 4]) a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __lowercase ( self , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' a__ : str = TFCLIPVisionModel(lowercase , name='vision_model') a__ : str = TFBertModel(lowercase , name='text_model') return vision_model, text_model def __lowercase ( self) -> List[Any]: '''simple docstring''' a__ : List[str] = TFCLIPVisionModelTester(self) a__ : Dict = TFBertModelTester(self) a__ : Optional[Any] = clip_model_tester.prepare_config_and_inputs() a__ : List[Any] = bert_model_tester.prepare_config_and_inputs() a__ , a__ : Union[str, Any] = vision_config_and_inputs ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : Optional[int] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class A__ ( unittest.TestCase ): """simple docstring""" @slow def __lowercase ( self) -> Any: '''simple docstring''' a__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained( 'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowercase) a__ : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian') a__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') a__ : Optional[Any] = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=lowercase , padding=lowercase , return_tensors='np') a__ : int = model(**lowercase) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) a__ : List[str] = np.array([[1.2_28_47_27, 0.3_10_41_22]]) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase , atol=1e-3))
225
1
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase, __lowercase= ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase = input('''Enter integers separated by spaces: ''') lowerCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
295
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
1
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = TextToVideoSDPipeline __lowerCAmelCase = TEXT_TO_IMAGE_PARAMS __lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __lowerCAmelCase = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def A (self : Optional[int] ): torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) A = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) A = CLIPTextModel(_SCREAMING_SNAKE_CASE ) A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def A (self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=0 ): if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ): A = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: A = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) A = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def A (self : Tuple ): A = "cpu" # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = TextToVideoSDPipeline(**_SCREAMING_SNAKE_CASE ) A = sd_pipe.to(_SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) A = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) A = "np" A = sd_pipe(**_SCREAMING_SNAKE_CASE ).frames A = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) A = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A (self : int ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def A (self : Dict ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def A (self : Optional[Any] ): pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def A (self : int ): pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def A (self : Tuple ): pass def A (self : Any ): return super().test_progress_bar() @slow @skip_mps class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def A (self : List[str] ): A = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) A = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) A = pipe.to("""cuda""" ) A = "Spiderman is surfing" A = torch.Generator(device="""cpu""" ).manual_seed(0 ) A = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="""pt""" ).frames A = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def A (self : List[Any] ): A = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) A = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) A = pipe.to("""cuda""" ) A = "Spiderman is surfing" A = torch.Generator(device="""cpu""" ).manual_seed(0 ) A = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""pt""" ).frames A = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
364
'''simple docstring''' def __a ( UpperCAmelCase ) ->bool: """simple docstring""" return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") ) def __a ( UpperCAmelCase ) ->bool: """simple docstring""" A = credit_card_number A = 0 A = len(UpperCAmelCase ) - 2 for i in range(UpperCAmelCase , -1 , -2 ): # double the value of every second digit A = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 A = cc_number[:i] + str(UpperCAmelCase ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(UpperCAmelCase ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def __a ( UpperCAmelCase ) ->bool: """simple docstring""" A = f"""{credit_card_number} is an invalid credit card number because""" if not credit_card_number.isdigit(): print(f"""{error_message} it has nonnumerical characters.""" ) return False if not 13 <= len(UpperCAmelCase ) <= 16: print(f"""{error_message} of its length.""" ) return False if not validate_initial_digits(UpperCAmelCase ): print(f"""{error_message} of its first two digits.""" ) return False if not luhn_validation(UpperCAmelCase ): print(f"""{error_message} it fails the Luhn check.""" ) return False print(f"""{credit_card_number} is a valid credit card number.""" ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('4111111111111111') validate_credit_card_number('32323')
337
0
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE=None ): if subparsers is not None: lowercase = subparsers.add_parser('env' ) else: lowercase = argparse.ArgumentParser('Accelerate env command' ) parser.add_argument( '--config_file' , default=__SCREAMING_SNAKE_CASE , help='The config file to use for the default values in the launching script.' ) if subparsers is not None: parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) return parser def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = torch.__version__ lowercase = torch.cuda.is_available() lowercase = is_xpu_available() lowercase = is_npu_available() lowercase = 'Not found' # Get the default from the config file. if args.config_file is not None or os.path.isfile(__SCREAMING_SNAKE_CASE ): lowercase = load_config_from_file(args.config_file ).to_dict() lowercase = { '`Accelerate` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Numpy version': np.__version__, 'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''', 'PyTorch XPU available': str(__SCREAMING_SNAKE_CASE ), 'PyTorch NPU available': str(__SCREAMING_SNAKE_CASE ), 'System RAM': F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''', } if pt_cuda_available: lowercase = torch.cuda.get_device_name() print('\nCopy-and-paste the text below in your GitHub issue\n' ) print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' ) lowercase = ( '\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else F'''\t{accelerate_config}''' ) print(__SCREAMING_SNAKE_CASE ) lowercase = accelerate_config return info def UpperCAmelCase_ ( ): lowercase = env_command_parser() lowercase = parser.parse_args() env_command(__SCREAMING_SNAKE_CASE ) return 0 if __name__ == "__main__": raise SystemExit(main())
195
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : str = ["""pixel_values"""] def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ): super().__init__(**snake_case ) lowercase = size if size is not None else {'shortest_edge': 224} lowercase = get_size_dict(snake_case , default_to_square=snake_case ) lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowercase = get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size' ) lowercase = do_resize lowercase = size lowercase = resample lowercase = do_center_crop lowercase = crop_size lowercase = do_rescale lowercase = rescale_factor lowercase = do_normalize lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase = image_std if image_std is not None else OPENAI_CLIP_STD lowercase = do_convert_rgb def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ): lowercase = get_size_dict(snake_case , default_to_square=snake_case ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case ) return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ): lowercase = get_size_dict(snake_case ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ): return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ): return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ): lowercase = do_resize if do_resize is not None else self.do_resize lowercase = size if size is not None else self.size lowercase = get_size_dict(snake_case , param_name='size' , default_to_square=snake_case ) lowercase = resample if resample is not None else self.resample lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase = crop_size if crop_size is not None else self.crop_size lowercase = get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case ) lowercase = do_rescale if do_rescale is not None else self.do_rescale lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase = do_normalize if do_normalize is not None else self.do_normalize lowercase = image_mean if image_mean is not None else self.image_mean lowercase = image_std if image_std is not None else self.image_std lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase = make_list_of_images(snake_case ) if not valid_images(snake_case ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase = [convert_to_rgb(snake_case ) for image in images] # All transformations expect numpy arrays. lowercase = [to_numpy_array(snake_case ) for image in images] if do_resize: lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images] if do_center_crop: lowercase = [self.center_crop(image=snake_case , size=snake_case ) for image in images] if do_rescale: lowercase = [self.rescale(image=snake_case , scale=snake_case ) for image in images] if do_normalize: lowercase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images] lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images] lowercase = {'pixel_values': images} return BatchFeature(data=snake_case , tensor_type=snake_case )
195
1
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder __SCREAMING_SNAKE_CASE : int = '__DUMMY_TRANSFORMERS_USER__' __SCREAMING_SNAKE_CASE : List[Any] = 'Dummy User' __SCREAMING_SNAKE_CASE : List[str] = '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' __SCREAMING_SNAKE_CASE : Dict = 'https://hub-ci.huggingface.co' __SCREAMING_SNAKE_CASE : Union[str, Any] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' __SCREAMING_SNAKE_CASE : List[str] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' __SCREAMING_SNAKE_CASE : Optional[Any] = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , UpperCamelCase__ ) @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE ) -> int: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , UpperCamelCase__ ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , UpperCamelCase__ ) @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , UpperCamelCase__ ) @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: HfFolder.save_token(UpperCamelCase__ ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def _a ( ) -> Optional[int]: return HfApi(endpoint=UpperCamelCase__ ) @pytest.fixture(scope="""session""" ) def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = HfFolder.get_token() HfFolder.save_token(UpperCamelCase__ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(UpperCamelCase__ ) @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: def _cleanup_repo(_SCREAMING_SNAKE_CASE ): hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple: @contextmanager def _temporary_repo(_SCREAMING_SNAKE_CASE ): try: yield repo_id finally: cleanup_repo(UpperCamelCase__ ) return _temporary_repo @pytest.fixture(scope="""session""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = f"""repo_txt_data-{int(time.time() * 1_0E3 )}""" snake_case_ = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" , private=UpperCamelCase__ ) hf_api.upload_file( token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo="""data/text_data.txt""" , repo_id=UpperCamelCase__ , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: snake_case_ = f"""repo_zipped_txt_data-{int(time.time() * 1_0E3 )}""" snake_case_ = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" , private=UpperCamelCase__ ) hf_api.upload_file( token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo="""data.zip""" , repo_id=UpperCamelCase__ , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ = f"""repo_zipped_img_data-{int(time.time() * 1_0E3 )}""" snake_case_ = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" , private=UpperCamelCase__ ) hf_api.upload_file( token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo="""data.zip""" , repo_id=UpperCamelCase__ , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: return hf_private_dataset_repo_zipped_img_data_
352
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __A (snake_case__): '''simple docstring''' __lowercase: Any = ["""image_processor""", """tokenizer"""] __lowercase: Dict = """AutoImageProcessor""" __lowercase: List[Any] = """AutoTokenizer""" def __init__( self : Tuple , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" snake_case_ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCAmelCase_ , ) snake_case_ = kwargs.pop("""feature_extractor""" ) snake_case_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) snake_case_ = self.image_processor snake_case_ = False def __call__( self : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str ) ->List[str]: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = kwargs.pop("""images""" , UpperCAmelCase_ ) snake_case_ = kwargs.pop("""text""" , UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: snake_case_ = args[0] snake_case_ = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: snake_case_ = self.image_processor(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None: snake_case_ = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ ) if text is None: return inputs elif images is None: return encodings else: snake_case_ = encodings["""input_ids"""] return inputs def lowerCAmelCase ( self : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ) ->Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->str: """simple docstring""" return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @contextmanager def lowerCAmelCase ( self : List[str] ) ->Dict: """simple docstring""" warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) snake_case_ = True snake_case_ = self.tokenizer yield snake_case_ = self.image_processor snake_case_ = False def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : str=None ) ->Tuple: """simple docstring""" if added_vocab is None: snake_case_ = self.tokenizer.get_added_vocab() snake_case_ = {} while tokens: snake_case_ = re.search(R"""<s_(.*?)>""" , UpperCAmelCase_ , re.IGNORECASE ) if start_token is None: break snake_case_ = start_token.group(1 ) snake_case_ = re.search(RF"""</s_{key}>""" , UpperCAmelCase_ , re.IGNORECASE ) snake_case_ = start_token.group() if end_token is None: snake_case_ = tokens.replace(UpperCAmelCase_ , """""" ) else: snake_case_ = end_token.group() snake_case_ = re.escape(UpperCAmelCase_ ) snake_case_ = re.escape(UpperCAmelCase_ ) snake_case_ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , UpperCAmelCase_ , re.IGNORECASE ) if content is not None: snake_case_ = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node snake_case_ = self.tokenajson(UpperCAmelCase_ , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ ) if value: if len(UpperCAmelCase_ ) == 1: snake_case_ = value[0] snake_case_ = value else: # leaf nodes snake_case_ = [] for leaf in content.split(R"""<sep/>""" ): snake_case_ = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": snake_case_ = leaf[1:-2] # for categorical special tokens output[key].append(UpperCAmelCase_ ) if len(output[key] ) == 1: snake_case_ = output[key][0] snake_case_ = tokens[tokens.find(UpperCAmelCase_ ) + len(UpperCAmelCase_ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ ) if len(UpperCAmelCase_ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowerCAmelCase ( self : List[Any] ) ->Optional[int]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase_ , ) return self.image_processor_class @property def lowerCAmelCase ( self : List[str] ) ->str: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase_ , ) return self.image_processor
233
0
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _UpperCamelCase : Optional[int] = "2.13.1" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _UpperCamelCase : Any = concatenate_datasets _UpperCamelCase : Dict = DownloadConfig _UpperCamelCase : Dict = DownloadManager _UpperCamelCase : Dict = DownloadMode _UpperCamelCase : int = DownloadConfig _UpperCamelCase : Union[str, Any] = DownloadMode _UpperCamelCase : Dict = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
77
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : Any = logging.get_logger(__name__) snake_case_ : Dict = { "weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json", } class __a (lowerCamelCase ): __a : Tuple = "roc_bert" def __init__( self : Union[str, Any] , __magic_name__ : List[str]=3_05_22 , __magic_name__ : Tuple=7_68 , __magic_name__ : Any=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Union[str, Any]=30_72 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : str=2 , __magic_name__ : Any=0.0_2 , __magic_name__ : Dict=1E-12 , __magic_name__ : int=True , __magic_name__ : Optional[int]=0 , __magic_name__ : str="absolute" , __magic_name__ : Tuple=None , __magic_name__ : Any=True , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=7_68 , __magic_name__ : List[Any]=9_10 , __magic_name__ : Tuple=5_12 , __magic_name__ : Dict=2_48_58 , __magic_name__ : Any=True , **__magic_name__ : Union[str, Any] , ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : int = num_hidden_layers UpperCAmelCase_ : Optional[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : Optional[Any] = type_vocab_size UpperCAmelCase_ : str = layer_norm_eps UpperCAmelCase_ : Tuple = use_cache UpperCAmelCase_ : Optional[int] = enable_pronunciation UpperCAmelCase_ : Union[str, Any] = enable_shape UpperCAmelCase_ : List[str] = pronunciation_embed_dim UpperCAmelCase_ : List[str] = pronunciation_vocab_size UpperCAmelCase_ : int = shape_embed_dim UpperCAmelCase_ : Optional[int] = shape_vocab_size UpperCAmelCase_ : Optional[Any] = concat_input UpperCAmelCase_ : Dict = position_embedding_type UpperCAmelCase_ : Union[str, Any] = classifier_dropout super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
125
0
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: A_ : int = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: A_ : Optional[Any] = max( mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , ) A_ : Union[str, Any] = val return f[i][j] def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : int = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: A_ : Dict = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: A_ : List[Any] = dp[i - 1][w_] return dp[n][w_], dp def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if not (isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) A_ : Tuple = len(SCREAMING_SNAKE_CASE ) if num_items != len(SCREAMING_SNAKE_CASE ): A_ : Any = ( '''The number of weights must be the same as the number of values.\n''' f'''But got {num_items} weights and {len(SCREAMING_SNAKE_CASE )} values''' ) raise ValueError(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE ): if not isinstance(wt[i] , SCREAMING_SNAKE_CASE ): A_ : Any = ( '''All weights must be integers but got weight of ''' f'''type {type(wt[i] )} at index {i}''' ) raise TypeError(SCREAMING_SNAKE_CASE ) A_ , A_ : Union[str, Any] = knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A_ : set = set() _construct_solution(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return optimal_val, example_optional_set def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: optimal_set.add(SCREAMING_SNAKE_CASE ) _construct_solution(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = [3, 2, 4, 4] UpperCamelCase = [4, 3, 2, 3] UpperCamelCase = 4 UpperCamelCase = 6 UpperCamelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] UpperCamelCase , UpperCamelCase = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 UpperCamelCase , UpperCamelCase = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
65
from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _lowerCamelCase ( UpperCamelCase ): """simple docstring""" snake_case = ["image_processor"] snake_case = "SamImageProcessor" def __init__( self , _SCREAMING_SNAKE_CASE )->Union[str, Any]: '''simple docstring''' super().__init__(_SCREAMING_SNAKE_CASE ) A_ : Any = self.image_processor A_ : Optional[int] = -10 A_ : List[Any] = self.image_processor.size['''longest_edge'''] def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->BatchEncoding: '''simple docstring''' A_ : Union[str, Any] = self.image_processor( _SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) # pop arguments that are not used in the foward but used nevertheless A_ : Tuple = encoding_image_processor['''original_sizes'''] if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ): # Checks if Torch or TF tensor A_ : int = original_sizes.numpy() A_ , A_ , A_ : str = self._check_and_preprocess_points( input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , input_boxes=_SCREAMING_SNAKE_CASE , ) A_ : Optional[Any] = self._normalize_and_convert( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , ) return encoding_image_processor def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="pt" , )->Dict: '''simple docstring''' if input_points is not None: if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): A_ : Optional[Any] = [ self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , original_sizes[0] ) for point in input_points ] else: A_ : str = [ self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for point, original_size in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: A_ , A_ : Optional[Any] = self._pad_points_and_labels(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) A_ : List[str] = np.array(_SCREAMING_SNAKE_CASE ) if input_labels is not None: A_ : Dict = np.array(_SCREAMING_SNAKE_CASE ) if input_boxes is not None: if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): A_ : Tuple = [ self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , original_sizes[0] , is_bounding_box=_SCREAMING_SNAKE_CASE ) for box in input_boxes ] else: A_ : List[Any] = [ self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , is_bounding_box=_SCREAMING_SNAKE_CASE ) for box, original_size in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ] A_ : Union[str, Any] = np.array(_SCREAMING_SNAKE_CASE ) if input_boxes is not None: if return_tensors == "pt": A_ : Dict = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default A_ : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": A_ : Optional[int] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) # boxes batch size of 1 by default A_ : List[Any] = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({'''input_boxes''': input_boxes} ) if input_points is not None: if return_tensors == "pt": A_ : Union[str, Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # point batch size of 1 by default A_ : Union[str, Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": A_ : List[str] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) # point batch size of 1 by default A_ : Union[str, Any] = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({'''input_points''': input_points} ) if input_labels is not None: if return_tensors == "pt": A_ : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # point batch size of 1 by default A_ : List[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": A_ : int = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) # point batch size of 1 by default A_ : List[Any] = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({'''input_labels''': input_labels} ) return encoding_image_processor def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict: '''simple docstring''' A_ : Optional[Any] = max([point.shape[0] for point in input_points] ) A_ : int = [] for i, point in enumerate(_SCREAMING_SNAKE_CASE ): if point.shape[0] != expected_nb_points: A_ : Optional[int] = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) A_ : int = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(_SCREAMING_SNAKE_CASE ) A_ : Optional[int] = processed_input_points return input_points, input_labels def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->np.ndarray: '''simple docstring''' A_ , A_ : str = original_size A_ , A_ : Dict = self.image_processor._get_preprocess_shape(_SCREAMING_SNAKE_CASE , longest_edge=_SCREAMING_SNAKE_CASE ) A_ : Optional[int] = deepcopy(_SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE ) if is_bounding_box: A_ : Union[str, Any] = coords.reshape(-1 , 2 , 2 ) A_ : Any = coords[..., 0] * (new_w / old_w) A_ : List[str] = coords[..., 1] * (new_h / old_h) if is_bounding_box: A_ : str = coords.reshape(-1 , 4 ) return coords def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )->str: '''simple docstring''' if input_points is not None: if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ): # Checks for TF or Torch tensor A_ : List[str] = input_points.numpy().tolist() if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0] , _SCREAMING_SNAKE_CASE ): raise ValueError('''Input points must be a list of list of floating points.''' ) A_ : Optional[Any] = [np.array(_SCREAMING_SNAKE_CASE ) for input_point in input_points] else: A_ : Tuple = None if input_labels is not None: if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ): A_ : Dict = input_labels.numpy().tolist() if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0] , _SCREAMING_SNAKE_CASE ): raise ValueError('''Input labels must be a list of list integers.''' ) A_ : Union[str, Any] = [np.array(_SCREAMING_SNAKE_CASE ) for label in input_labels] else: A_ : str = None if input_boxes is not None: if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ): A_ : str = input_boxes.numpy().tolist() if ( not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0] , _SCREAMING_SNAKE_CASE ) or not isinstance(input_boxes[0][0] , _SCREAMING_SNAKE_CASE ) ): raise ValueError('''Input boxes must be a list of list of list of floating points.''' ) A_ : Tuple = [np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes] else: A_ : Dict = None return input_points, input_labels, input_boxes @property def _snake_case ( self )->List[str]: '''simple docstring''' A_ : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(_SCREAMING_SNAKE_CASE ) ) def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]: '''simple docstring''' return self.image_processor.post_process_masks(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
65
1
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list: return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__UpperCAmelCase ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('doctest').testmod()
225
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowerCamelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=512 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Union[str, Any]=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_input_mask SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = scope def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Optional[int] ): return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ): SCREAMING_SNAKE_CASE_ = LlamaModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , ) SCREAMING_SNAKE_CASE_ = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , ) SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , ): SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() # first forward pass SCREAMING_SNAKE_CASE_ = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , ) SCREAMING_SNAKE_CASE_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE_ = torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE_ = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0] SCREAMING_SNAKE_CASE_ = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () lowercase_ = (LlamaForCausalLM,) if is_torch_available() else () lowercase_ = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) lowercase_ = False lowercase_ = False def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = LlamaModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Any ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : List[Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ = type self.model_tester.create_and_check_model(*_lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = 'single_label_classification' SCREAMING_SNAKE_CASE_ = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self : List[Any] ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = 3 SCREAMING_SNAKE_CASE_ = 'multi_label_classification' SCREAMING_SNAKE_CASE_ = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def lowerCAmelCase_ ( self : int ): pass @parameterized.expand([('linear',), ('dynamic',)] ) def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Tuple ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = ids_tensor([1, 10] , config.vocab_size ) SCREAMING_SNAKE_CASE_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase ) original_model.to(_lowerCAmelCase ) original_model.eval() SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE_ = {'type': scaling_type, 'factor': 10.0} SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase ) scaled_model.to(_lowerCAmelCase ) scaled_model.eval() SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) SCREAMING_SNAKE_CASE_ = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 SCREAMING_SNAKE_CASE_ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off SCREAMING_SNAKE_CASE_ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) ) # Expected mean on dim = -1 SCREAMING_SNAKE_CASE_ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off SCREAMING_SNAKE_CASE_ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self : int ): SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) ) # Expected mean on dim = -1 SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off SCREAMING_SNAKE_CASE_ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def lowerCAmelCase_ ( self : int ): SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off SCREAMING_SNAKE_CASE_ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Model is curently gated' ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that ' SCREAMING_SNAKE_CASE_ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) SCREAMING_SNAKE_CASE_ = tokenizer.encode(_lowerCAmelCase , return_tensors='pt' ) SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_lowerCAmelCase ) # greedy generation outputs SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , max_new_tokens=64 , top_p=_lowerCAmelCase , temperature=1 , do_sample=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
225
1
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset A_ : Union[str, Any] = random.Random() def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=None )-> Dict: '''simple docstring''' if rng is None: _UpperCAmelCase : List[str] = global_rng _UpperCAmelCase : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self ,a_ ,a_=7 ,a_=400 ,a_=2_000 ,a_=2_048 ,a_=128 ,a_=1 ,a_=512 ,a_=30 ,a_=44_100 ,) -> str: _UpperCAmelCase : List[str] = parent _UpperCAmelCase : int = batch_size _UpperCAmelCase : Optional[Any] = min_seq_length _UpperCAmelCase : Dict = max_seq_length _UpperCAmelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCAmelCase : Tuple = spectrogram_length _UpperCAmelCase : Tuple = feature_size _UpperCAmelCase : Union[str, Any] = num_audio_channels _UpperCAmelCase : Optional[int] = hop_length _UpperCAmelCase : List[str] = chunk_length _UpperCAmelCase : str = sampling_rate def _snake_case ( self ) -> List[str]: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def _snake_case ( self ,a_=False ,a_=False ) -> int: def _flatten(a_ ): return list(itertools.chain(*lowerCAmelCase_ ) ) if equal_length: _UpperCAmelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _UpperCAmelCase : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: _UpperCAmelCase : Tuple = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowercase ( __lowerCAmelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase = TvltFeatureExtractor def _snake_case ( self ) -> List[Any]: _UpperCAmelCase : List[Any] = TvltFeatureExtractionTester(self ) def _snake_case ( self ) -> Dict: _UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCAmelCase_ ,"""spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ ,"""feature_size""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ ,"""num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ ,"""hop_length""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ ,"""chunk_length""" ) ) self.assertTrue(hasattr(lowerCAmelCase_ ,"""sampling_rate""" ) ) def _snake_case ( self ) -> List[Any]: _UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase : Optional[Any] = feat_extract_first.save_pretrained(lowerCAmelCase_ )[0] check_json_file_has_correct_format(lowerCAmelCase_ ) _UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(lowerCAmelCase_ ) _UpperCAmelCase : Optional[Any] = feat_extract_first.to_dict() _UpperCAmelCase : Dict = feat_extract_second.to_dict() _UpperCAmelCase : Any = dict_first.pop("""mel_filters""" ) _UpperCAmelCase : List[Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCAmelCase_ ,lowerCAmelCase_ ) ) self.assertEqual(lowerCAmelCase_ ,lowerCAmelCase_ ) def _snake_case ( self ) -> Optional[int]: _UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase : Union[str, Any] = os.path.join(lowerCAmelCase_ ,"""feat_extract.json""" ) feat_extract_first.to_json_file(lowerCAmelCase_ ) _UpperCAmelCase : Dict = self.feature_extraction_class.from_json_file(lowerCAmelCase_ ) _UpperCAmelCase : List[str] = feat_extract_first.to_dict() _UpperCAmelCase : Any = feat_extract_second.to_dict() _UpperCAmelCase : Dict = dict_first.pop("""mel_filters""" ) _UpperCAmelCase : List[Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCAmelCase_ ,lowerCAmelCase_ ) ) self.assertEqual(lowerCAmelCase_ ,lowerCAmelCase_ ) def _snake_case ( self ) -> Dict: # Initialize feature_extractor _UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )] _UpperCAmelCase : Optional[int] = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs] # Test not batched input _UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44_100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _UpperCAmelCase : Optional[Any] = feature_extractor(lowerCAmelCase_ ,return_tensors="""np""" ,sampling_rate=44_100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _UpperCAmelCase : int = feature_extractor( lowerCAmelCase_ ,return_tensors="""np""" ,sampling_rate=44_100 ,mask_audio=lowerCAmelCase_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _UpperCAmelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCAmelCase : Tuple = np.asarray(lowerCAmelCase_ ) _UpperCAmelCase : Optional[int] = feature_extractor(lowerCAmelCase_ ,return_tensors="""np""" ,sampling_rate=44_100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def _snake_case ( self ,a_ ) -> Optional[Any]: _UpperCAmelCase : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech _UpperCAmelCase : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCAmelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def _snake_case ( self ) -> int: _UpperCAmelCase : int = self._load_datasamples(1 ) _UpperCAmelCase : List[Any] = TvltFeatureExtractor() _UpperCAmelCase : Tuple = feature_extractor(lowerCAmelCase_ ,return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape ,(1, 1, 192, 128) ) _UpperCAmelCase : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCAmelCase_ ,atol=1E-4 ) )
365
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowercase : """simple docstring""" UpperCAmelCase = 42 UpperCAmelCase = 42 class lowercase : """simple docstring""" def __init__( self ,a_ ) -> List[str]: _UpperCAmelCase : list[list[Edge]] = [[] for _ in range(a_ )] _UpperCAmelCase : int = size def __getitem__( self ,a_ ) -> Iterator[Edge]: return iter(self._graph[vertex] ) @property def _snake_case ( self ) -> List[Any]: return self._size def _snake_case ( self ,a_ ,a_ ,a_ ) -> Tuple: if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(a_ ,a_ ) ) def _snake_case ( self ,a_ ,a_ ) -> int | None: _UpperCAmelCase : Union[str, Any] = deque([start_vertex] ) _UpperCAmelCase : list[int | None] = [None] * self.size _UpperCAmelCase : Union[str, Any] = 0 while queue: _UpperCAmelCase : Union[str, Any] = queue.popleft() _UpperCAmelCase : Union[str, Any] = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _UpperCAmelCase : List[Any] = current_distance + edge.weight _UpperCAmelCase : List[Any] = distances[edge.destination_vertex] if ( isinstance(a_ ,a_ ) and new_distance >= dest_vertex_distance ): continue _UpperCAmelCase : Tuple = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
349
0
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class UpperCAmelCase_ ( unittest.TestCase): def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=4, ): '''simple docstring''' _lowerCAmelCase : List[Any] = parent _lowerCAmelCase : Dict = batch_size _lowerCAmelCase : Optional[Any] = seq_length _lowerCAmelCase : Optional[int] = is_training _lowerCAmelCase : Dict = use_attention_mask _lowerCAmelCase : List[str] = use_token_type_ids _lowerCAmelCase : Union[str, Any] = use_labels _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Union[str, Any] = hidden_size _lowerCAmelCase : str = num_hidden_layers _lowerCAmelCase : Optional[int] = num_attention_heads _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : Union[str, Any] = hidden_act _lowerCAmelCase : Any = hidden_dropout_prob _lowerCAmelCase : Any = attention_probs_dropout_prob _lowerCAmelCase : str = max_position_embeddings _lowerCAmelCase : Dict = type_vocab_size _lowerCAmelCase : Union[str, Any] = type_sequence_label_size _lowerCAmelCase : str = initializer_range _lowerCAmelCase : List[Any] = num_choices def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) _lowerCAmelCase : int = None if self.use_attention_mask: _lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCAmelCase : Any = None if self.use_token_type_ids: _lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) _lowerCAmelCase : Dict = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__a, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = config_and_inputs _lowerCAmelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = config_and_inputs _lowerCAmelCase : Tuple = True _lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class UpperCAmelCase_ ( a , unittest.TestCase): lowerCamelCase__ = True lowerCamelCase__ = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : str = FlaxBertModelTester(self) @slow def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = FlaxBertModel.from_pretrained("bert-base-cased") _lowerCAmelCase : Dict = model(np.ones((1, 1))) self.assertIsNotNone(__a)
36
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
0
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : str ): """simple docstring""" _snake_case : str = int(snake_case__ ) # Initialize Result _snake_case : Dict = [] # Traverse through all denomination for denomination in reversed(snake_case__ ): # Find denominations while int(snake_case__ ) >= int(snake_case__ ): total_value -= int(snake_case__ ) answer.append(snake_case__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": A_ = [] A_ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): A_ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) A_ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter A_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] A_ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F'''Following is minimal change for {value}: ''') A_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
132
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''MaskFormerFeatureExtractor'''] A_ = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] A_ = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
132
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> str: __UpperCamelCase =tempfile.mkdtemp() # fmt: off __UpperCamelCase =['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on __UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) ) __UpperCamelCase =['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] __UpperCamelCase ={'unk_token': '<unk>'} __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) __UpperCamelCase ={ 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } __UpperCamelCase =os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(A_ , A_ ) def _a ( self , **A_ ) -> List[Any]: return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , **A_ ) -> Any: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def _a ( self , **A_ ) -> Optional[Any]: return CLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def _a ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def _a ( self ) -> Tuple: __UpperCamelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __UpperCamelCase =[Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self ) -> List[str]: __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_rust_tokenizer() __UpperCamelCase =self.get_image_processor() __UpperCamelCase =CLIPProcessor(tokenizer=A_ , image_processor=A_ ) processor_slow.save_pretrained(self.tmpdirname ) __UpperCamelCase =CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) __UpperCamelCase =CLIPProcessor(tokenizer=A_ , image_processor=A_ ) processor_fast.save_pretrained(self.tmpdirname ) __UpperCamelCase =CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , A_ ) self.assertIsInstance(processor_fast.tokenizer , A_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , A_ ) self.assertIsInstance(processor_fast.image_processor , A_ ) def _a ( self ) -> Optional[Any]: __UpperCamelCase =CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __UpperCamelCase =self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) __UpperCamelCase =CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def _a ( self ) -> Tuple: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =CLIPProcessor(tokenizer=A_ , image_processor=A_ ) __UpperCamelCase =self.prepare_image_inputs() __UpperCamelCase =image_processor(A_ , return_tensors='np' ) __UpperCamelCase =processor(images=A_ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =CLIPProcessor(tokenizer=A_ , image_processor=A_ ) __UpperCamelCase ='lower newer' __UpperCamelCase =processor(text=A_ ) __UpperCamelCase =tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self ) -> List[Any]: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =CLIPProcessor(tokenizer=A_ , image_processor=A_ ) __UpperCamelCase ='lower newer' __UpperCamelCase =self.prepare_image_inputs() __UpperCamelCase =processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def _a ( self ) -> Union[str, Any]: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =CLIPProcessor(tokenizer=A_ , image_processor=A_ ) __UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __UpperCamelCase =processor.batch_decode(A_ ) __UpperCamelCase =tokenizer.batch_decode(A_ ) self.assertListEqual(A_ , A_ ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =CLIPProcessor(tokenizer=A_ , image_processor=A_ ) __UpperCamelCase ='lower newer' __UpperCamelCase =self.prepare_image_inputs() __UpperCamelCase =processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
62
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , ): if attention_mask is None: __lowercase : Tuple = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowercase : Any = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowercase : Any = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCAmelCase_ ) if decoder_head_mask is None: __lowercase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ ) if cross_attn_head_mask is None: __lowercase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCAmelCase_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class lowerCAmelCase : '''simple docstring''' def __init__( self : Union[str, Any] , __a : str , __a : Tuple=13 , __a : List[Any]=7 , __a : Any=True , __a : List[str]=False , __a : Optional[Any]=99 , __a : Tuple=16 , __a : int=2 , __a : Optional[Any]=4 , __a : int=4 , __a : Any="relu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : Dict=0.0 , __a : List[str]=0.0 , __a : Union[str, Any]=20 , __a : str=2 , __a : str=1 , __a : Optional[int]=0 , ) -> Optional[int]: """simple docstring""" __lowercase : Any = parent __lowercase : Tuple = batch_size __lowercase : Any = seq_length __lowercase : Tuple = is_training __lowercase : Optional[Any] = use_labels __lowercase : Dict = vocab_size __lowercase : Optional[Any] = hidden_size __lowercase : Dict = num_hidden_layers __lowercase : Dict = num_attention_heads __lowercase : Dict = intermediate_size __lowercase : Union[str, Any] = hidden_act __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : Tuple = attention_probs_dropout_prob __lowercase : Tuple = encoder_layerdrop __lowercase : List[str] = decoder_layerdrop __lowercase : Any = max_position_embeddings __lowercase : Any = eos_token_id __lowercase : Dict = pad_token_id __lowercase : List[str] = bos_token_id def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : str = self.eos_token_id # Eos Token __lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowercase : Tuple = input_ids.clamp(self.pad_token_id + 1 ) __lowercase : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowercase : List[str] = self.get_config() __lowercase : str = prepare_mam_aaa_inputs_dict(__a , __a , __a ) return config, inputs_dict def lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" __lowercase , __lowercase : int = self.prepare_config_and_inputs() return config, inputs_dict def lowerCAmelCase ( self : int , __a : str , __a : str ) -> int: """simple docstring""" __lowercase : Optional[Any] = MaMaaaModel(config=__a ).get_decoder().to(__a ).eval() __lowercase : List[str] = inputs_dict["""input_ids"""] __lowercase : Dict = inputs_dict["""attention_mask"""] __lowercase : List[Any] = inputs_dict["""head_mask"""] # first forward pass __lowercase : List[str] = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) __lowercase , __lowercase : str = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __lowercase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowercase : str = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __lowercase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowercase : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __lowercase : Optional[int] = model(__a , attention_mask=__a )["""last_hidden_state"""] __lowercase : Union[str, Any] = model(__a , attention_mask=__a , past_key_values=__a )[ """last_hidden_state""" ] # select random slice __lowercase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowercase : int = output_from_no_past[:, -3:, random_slice_idx].detach() __lowercase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-2 ) ) def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : List[str] ) -> str: """simple docstring""" __lowercase : Dict = MaMaaaModel(config=__a ).to(__a ).eval() __lowercase : Any = model(**__a ) __lowercase : str = outputs.encoder_last_hidden_state __lowercase : Optional[Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: __lowercase : str = model.get_encoder() encoder.save_pretrained(__a ) __lowercase : List[Any] = MaMaaaEncoder.from_pretrained(__a ).to(__a ) __lowercase : Tuple = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: __lowercase : Tuple = model.get_decoder() decoder.save_pretrained(__a ) __lowercase : Tuple = MaMaaaDecoder.from_pretrained(__a ).to(__a ) __lowercase : Tuple = decoder( input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__a , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): '''simple docstring''' _A : int = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) _A : int = (MaMaaaForConditionalGeneration,) if is_torch_available() else () _A : Union[str, Any] = ( { '''conversational''': MaMaaaForConditionalGeneration, '''feature-extraction''': MaMaaaModel, '''summarization''': MaMaaaForConditionalGeneration, '''text2text-generation''': MaMaaaForConditionalGeneration, '''translation''': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) _A : Optional[int] = True _A : Union[str, Any] = True _A : Any = False _A : int = False def lowerCAmelCase ( self : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[Any] , __a : Tuple , __a : Optional[int] ) -> Any: """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase : int = MaMaaaModelTester(self ) __lowercase : Union[str, Any] = ConfigTester(self , config_class=__a ) def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" __lowercase , __lowercase : Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: __lowercase : List[str] = model_class(__a ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) __lowercase , __lowercase : str = model_class.from_pretrained(__a , output_loading_info=__a ) self.assertEqual(info["""missing_keys"""] , [] ) def lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__a ) def lowerCAmelCase ( self : int ) -> Any: """simple docstring""" __lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): __lowercase : Union[str, Any] = model_class(__a ) model.to(__a ) model.eval() __lowercase : Any = copy.deepcopy(self._prepare_for_class(__a , __a ) ) if not self.is_encoder_decoder: __lowercase : int = inputs["""input_ids"""] del inputs["input_ids"] else: __lowercase : Optional[int] = inputs["""input_ids"""] __lowercase : Optional[Any] = inputs.get("""decoder_input_ids""" , __a ) del inputs["input_ids"] inputs.pop("""decoder_input_ids""" , __a ) __lowercase : Union[str, Any] = model.get_input_embeddings() if not self.is_encoder_decoder: __lowercase : Dict = wte(__a ) else: __lowercase : str = wte(__a ) __lowercase : Union[str, Any] = wte(__a ) with torch.no_grad(): model(**__a )[0] def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs() __lowercase : List[str] = input_dict["""input_ids"""] __lowercase : Optional[int] = input_ids.ne(1 ).to(__a ) __lowercase : List[Any] = MaMaaaForConditionalGeneration(__a ).eval().to(__a ) if torch_device == "cuda": model.half() model.generate(__a , attention_mask=__a ) model.generate(num_beams=4 , do_sample=__a , early_stopping=__a , num_return_sequences=3 ) def snake_case_ ( lowerCAmelCase_ : Optional[Any] ): return torch.tensor(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ ) lowerCamelCase : Dict = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" ) def lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" __lowercase : List[str] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__a ) __lowercase : Union[str, Any] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] ) __lowercase : int = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] ) __lowercase : int = prepare_mam_aaa_inputs_dict(model.config , __a , __a ) with torch.no_grad(): __lowercase : int = model(**__a )[0] __lowercase : int = torch.Size((1, 11, 1024) ) self.assertEqual(output.shape , __a ) # change to expected output here __lowercase : Dict = torch.tensor( [[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__a ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) ) def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a ) # change to intended input __lowercase : Any = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] ) __lowercase : Union[str, Any] = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] ) __lowercase : Tuple = prepare_mam_aaa_inputs_dict(model.config , __a , __a ) with torch.no_grad(): __lowercase : Optional[Any] = model(**__a )[0] __lowercase : Tuple = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , __a ) # change to expected output here __lowercase : Union[str, Any] = torch.tensor( [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__a ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) ) def lowerCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" __lowercase : List[str] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a ) __lowercase : Tuple = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" ) __lowercase : Dict = [ """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent""" """ Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de""" """ l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""", ] # The below article tests that we don't add any hypotheses outside of the top n_beams __lowercase : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors="""pt""" ) __lowercase : Dict = model.generate( input_ids=dct["""input_ids"""].to(__a ) , attention_mask=dct["""attention_mask"""].to(__a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , ) __lowercase : Any = [ """The NSA case highlights the total absence of intelligence debate""", """I think there are two levels of response from the French government.""", """When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.""" """ Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all""" """ communications in France.""", ] __lowercase : Dict = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__a , skip_special_tokens=__a ) assert generated == expected_en
233
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _snake_case = { "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["ConvNextFeatureExtractor"] _snake_case = ["ConvNextImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvNextForImageClassification", "ConvNextModel", "ConvNextPreTrainedModel", "ConvNextBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "TFConvNextForImageClassification", "TFConvNextModel", "TFConvNextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
300
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP _snake_case = False try: _snake_case = _is_package_available("google.colab") except ModuleNotFoundError: pass @input.register class UpperCAmelCase_ : def __init__( self, __a = None, __a = []): '''simple docstring''' _lowerCAmelCase : Optional[int] = 0 _lowerCAmelCase : Optional[int] = choices _lowerCAmelCase : Tuple = prompt if sys.platform == "win32": _lowerCAmelCase : Optional[Any] = "*" else: _lowerCAmelCase : Dict = "➔ " def snake_case__ ( self, __a, __a = ""): '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index], 32, __a) else: forceWrite(self.choices[index], __a) def snake_case__ ( self, __a): '''simple docstring''' if index == self.position: forceWrite(f" {self.arrow_char} ") self.write_choice(__a) else: forceWrite(f" {self.choices[index]}") reset_cursor() def snake_case__ ( self, __a, __a = 1): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__a) move_cursor(__a, direction.name) self.print_choice(self.position) @input.mark(KEYMAP["up"]) def snake_case__ ( self): '''simple docstring''' self.move_direction(Direction.UP) @input.mark(KEYMAP["down"]) def snake_case__ ( self): '''simple docstring''' self.move_direction(Direction.DOWN) @input.mark(KEYMAP["newline"]) def snake_case__ ( self): '''simple docstring''' move_cursor(len(self.choices) - self.position, "DOWN") return self.position @input.mark(KEYMAP["interrupt"]) def snake_case__ ( self): '''simple docstring''' move_cursor(len(self.choices) - self.position, "DOWN") raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)]) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : str = int(chr(self.current_selection)) _lowerCAmelCase : List[str] = index - self.position if index == self.position: return if index < len(self.choices): if self.position > index: self.move_direction(Direction.UP, -movement) elif self.position < index: self.move_direction(Direction.DOWN, __a) else: return else: return def snake_case__ ( self, __a = 0): '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt, "\n") if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter", "\n") else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n") _lowerCAmelCase : List[Any] = default_choice for i in range(len(self.choices)): self.print_choice(__a) forceWrite("\n") move_cursor(len(self.choices) - self.position, "UP") with cursor.hide(): while True: if in_colab: try: _lowerCAmelCase : str = int(builtins.input()) except ValueError: _lowerCAmelCase : List[Any] = default_choice else: _lowerCAmelCase : List[str] = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices) + 1): move_cursor(1, "UP") clear_line() self.write_choice(__a, "\n") return choice
300
1
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets UpperCamelCase__ = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n' UpperCamelCase__ = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n' UpperCamelCase__ = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n' def lowerCAmelCase_ ( __A, __A ) -> Dict: '''simple docstring''' return float((preds == labels).mean() ) def lowerCAmelCase_ ( __A, __A ) -> List[str]: '''simple docstring''' UpperCAmelCase__ = simple_accuracy(__A, __A ) UpperCAmelCase__ = float(fa_score(y_true=__A, y_pred=__A ) ) return { "accuracy": acc, "f1": fa, } def lowerCAmelCase_ ( __A, __A ) -> Any: '''simple docstring''' UpperCAmelCase__ = np.array(__A ) UpperCAmelCase__ = np.array(__A ) UpperCAmelCase__ = en_sentvecs.shape[0] # mean centering UpperCAmelCase__ = en_sentvecs - np.mean(__A, axis=0 ) UpperCAmelCase__ = in_sentvecs - np.mean(__A, axis=0 ) UpperCAmelCase__ = cdist(__A, __A, "cosine" ) UpperCAmelCase__ = np.array(range(__A ) ) UpperCAmelCase__ = sim.argsort(axis=1 )[:, :10] UpperCAmelCase__ = np.any(preds == actual[:, None], axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def lowercase_ (self : Optional[Any] ) -> List[str]: """simple docstring""" if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), "references": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , ) def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> List[Any]: """simple docstring""" if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(__UpperCAmelCase , __UpperCAmelCase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(__UpperCAmelCase , __UpperCAmelCase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )} else: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" )
65
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class A ( UpperCAmelCase_ ): __UpperCAmelCase : torch.FloatTensor __UpperCAmelCase : Optional[torch.FloatTensor] = None def lowerCAmelCase_ ( __A, __A=0.999, __A="cosine", ) -> Tuple: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(__A ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__A ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCAmelCase__ = [] for i in range(__A ): UpperCAmelCase__ = i / num_diffusion_timesteps UpperCAmelCase__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ), __A ) ) return torch.tensor(__A, dtype=torch.floataa ) class A ( UpperCAmelCase_ , UpperCAmelCase_ ): @register_to_config def __init__(self : List[str] , __UpperCAmelCase : int = 1_0_0_0 , __UpperCAmelCase : str = "fixed_small_log" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[float] = 1.0 , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : str = "squaredcos_cap_v2" , ) -> Optional[int]: """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) UpperCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase ) UpperCAmelCase__ = 1.0 - self.betas UpperCAmelCase__ = torch.cumprod(self.alphas , dim=0 ) UpperCAmelCase__ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCAmelCase__ = 1.0 # setable values UpperCAmelCase__ = None UpperCAmelCase__ = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() ) UpperCAmelCase__ = variance_type def lowercase_ (self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor: """simple docstring""" return sample def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None ) -> Any: """simple docstring""" UpperCAmelCase__ = num_inference_steps UpperCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase ) def lowercase_ (self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None ) -> Tuple: """simple docstring""" if prev_timestep is None: UpperCAmelCase__ = t - 1 UpperCAmelCase__ = self.alphas_cumprod[t] UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCAmelCase__ = 1 - alpha_prod_t UpperCAmelCase__ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCAmelCase__ = self.betas[t] else: UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCAmelCase__ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCAmelCase__ = torch.log(torch.clamp(__UpperCAmelCase , min=1E-20 ) ) UpperCAmelCase__ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCAmelCase__ = variance.log() UpperCAmelCase__ = beta.log() UpperCAmelCase__ = (predicted_variance + 1) / 2 UpperCAmelCase__ = frac * max_log + (1 - frac) * min_log return variance def lowercase_ (self : Optional[int] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]: """simple docstring""" UpperCAmelCase__ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCAmelCase__ , UpperCAmelCase__ = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 ) else: UpperCAmelCase__ = None # 1. compute alphas, betas if prev_timestep is None: UpperCAmelCase__ = t - 1 UpperCAmelCase__ = self.alphas_cumprod[t] UpperCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCAmelCase__ = 1 - alpha_prod_t UpperCAmelCase__ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCAmelCase__ = self.betas[t] UpperCAmelCase__ = self.alphas[t] else: UpperCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev UpperCAmelCase__ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ = torch.clamp( __UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCAmelCase__ = 0 if t > 0: UpperCAmelCase__ = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device ) UpperCAmelCase__ = self._get_variance( __UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , ) if self.variance_type == "fixed_small_log": UpperCAmelCase__ = variance elif self.variance_type == "learned_range": UpperCAmelCase__ = (0.5 * variance).exp() else: raise ValueError( f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" " for the UnCLIPScheduler." ) UpperCAmelCase__ = variance * variance_noise UpperCAmelCase__ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase ) def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.IntTensor , ) -> torch.FloatTensor: """simple docstring""" UpperCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) UpperCAmelCase__ = timesteps.to(original_samples.device ) UpperCAmelCase__ = alphas_cumprod[timesteps] ** 0.5 UpperCAmelCase__ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 ) UpperCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCAmelCase__ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
65
1
def __snake_case ( __UpperCamelCase : list[list[float]] ): """simple docstring""" A_ = [] for data in source_data: for i, el in enumerate(__UpperCamelCase ): if len(__UpperCamelCase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__UpperCamelCase ) ) return data_lists def __snake_case ( __UpperCamelCase : list[list[float]] ,__UpperCamelCase : list[int] ): """simple docstring""" A_ = [] for dlist, weight in zip(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ) A_ = max(__UpperCamelCase ) A_ = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: A_ = f'''Invalid weight of {weight:f} provided''' raise ValueError(__UpperCamelCase ) score_lists.append(__UpperCamelCase ) return score_lists def __snake_case ( __UpperCamelCase : list[list[float]] ): """simple docstring""" A_ = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__UpperCamelCase ): A_ = final_scores[j] + ele return final_scores def __snake_case ( __UpperCamelCase : list[list[float]] ,__UpperCamelCase : list[int] ): """simple docstring""" A_ = get_data(__UpperCamelCase ) A_ = calculate_each_score(__UpperCamelCase ,__UpperCamelCase ) A_ = generate_final_scores(__UpperCamelCase ) # append scores to source data for i, ele in enumerate(__UpperCamelCase ): source_data[i].append(__UpperCamelCase ) return source_data
367
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :Dict = logging.get_logger(__name__) __a :int = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = 'realm' def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ): super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) # Common config A_ = vocab_size A_ = max_position_embeddings A_ = hidden_size A_ = retriever_proj_size A_ = num_hidden_layers A_ = num_attention_heads A_ = num_candidates A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = initializer_range A_ = type_vocab_size A_ = layer_norm_eps # Reader config A_ = span_hidden_size A_ = max_span_width A_ = reader_layer_norm_eps A_ = reader_beam_size A_ = reader_seq_len # Retrieval config A_ = num_block_records A_ = searcher_beam_size
329
0
"""simple docstring""" from __future__ import annotations import numpy as np def _A ( lowercase ): """simple docstring""" a , a =np.shape(lowercase ) if rows != columns: a =( '''\'table\' has to be of square shaped array but got a ''' f'''{rows}x{columns} array:\n{table}''' ) raise ValueError(lowercase ) a =np.zeros((rows, columns) ) a =np.zeros((rows, columns) ) for i in range(lowercase ): for j in range(lowercase ): a =sum(lower[i][k] * upper[k][j] for k in range(lowercase ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) a =(table[i][j] - total) / upper[j][j] a =1 for j in range(lowercase , lowercase ): a =sum(lower[i][k] * upper[k][j] for k in range(lowercase ) ) a =table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
81
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : Dict = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = '''gptj''' __SCREAMING_SNAKE_CASE = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , lowercase=5_0_4_0_0 , lowercase=2_0_4_8 , lowercase=4_0_9_6 , lowercase=2_8 , lowercase=1_6 , lowercase=6_4 , lowercase=None , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=5_0_2_5_6 , lowercase=5_0_2_5_6 , lowercase=False , **lowercase , ) -> Tuple: __UpperCamelCase = vocab_size __UpperCamelCase = n_positions __UpperCamelCase = n_embd __UpperCamelCase = n_layer __UpperCamelCase = n_head __UpperCamelCase = n_inner __UpperCamelCase = rotary_dim __UpperCamelCase = activation_function __UpperCamelCase = resid_pdrop __UpperCamelCase = embd_pdrop __UpperCamelCase = attn_pdrop __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id super().__init__( bos_token_id=lowercase , eos_token_id=lowercase , tie_word_embeddings=lowercase , **lowercase ) class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase = "default" , lowercase = None , lowercase = False , ) -> List[str]: super().__init__(lowercase , task=lowercase , patching_specs=lowercase , use_past=lowercase ) if not getattr(self._config , """pad_token_id""" , lowercase ): # TODO: how to do that better? __UpperCamelCase = 0 @property def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: __UpperCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(lowercase , direction="""inputs""" ) __UpperCamelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __UpperCamelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def __lowerCamelCase ( self ) -> int: return self._config.n_layer @property def __lowerCamelCase ( self ) -> int: return self._config.n_head def __lowerCamelCase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]: __UpperCamelCase = super(lowercase , self ).generate_dummy_inputs( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs["""attention_mask"""] if self.use_past: __UpperCamelCase = ordered_inputs["""attention_mask"""].dtype __UpperCamelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) return ordered_inputs @property def __lowerCamelCase ( self ) -> int: return 1_3
349
0
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def snake_case_ ( *lowerCAmelCase_ : List[str] ): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : Optional[Any] = list(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): __lowercase : Dict = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def snake_case_ ( lowerCAmelCase_ : Exception ): __lowercase : int = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def snake_case_ ( lowerCAmelCase_ : callable = None , lowerCAmelCase_ : int = 128 ): if function is None: return functools.partial(lowerCAmelCase_ , starting_batch_size=lowerCAmelCase_ ) __lowercase : Dict = starting_batch_size def decorator(*lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() __lowercase : List[str] = list(inspect.signature(lowerCAmelCase_ ).parameters.keys() ) # Guard against user error if len(lowerCAmelCase_ ) < (len(lowerCAmelCase_ ) + 1): __lowercase : int = """, """.join([F"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F"Batch size was passed into `{function.__name__}` as the first argument when called." F"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) except Exception as e: if should_reduce_batch_size(lowerCAmelCase_ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
306
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Any = get_failure_array(lowerCAmelCase_ ) # 2) Step through text searching for pattern __lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern while i < len(lowerCAmelCase_ ): if pattern[j] == text[i]: if j == (len(lowerCAmelCase_ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase : Optional[Any] = failure[j - 1] continue i += 1 return False def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : List[Any] = [0] __lowercase : Optional[Any] = 0 __lowercase : List[Any] = 1 while j < len(lowerCAmelCase_ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase : List[str] = failure[i - 1] continue j += 1 failure.append(lowerCAmelCase_ ) return failure if __name__ == "__main__": # Test 1) lowerCamelCase : Dict = '''abc1abc12''' lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCamelCase : List[Any] = '''ABABX''' lowerCamelCase : List[Any] = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCamelCase : int = '''AAAB''' lowerCamelCase : Optional[int] = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCamelCase : Optional[Any] = '''abcdabcy''' lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCamelCase : Dict = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
306
1
"""simple docstring""" from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def _lowercase ( __lowerCAmelCase = True , *__lowerCAmelCase , **__lowerCAmelCase ) -> int: if not is_tqdm_available(): raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" ) SCREAMING_SNAKE_CASE__ : str = False if main_process_only: SCREAMING_SNAKE_CASE__ : Dict = PartialState().local_process_index == 0 return _tqdm(*__lowerCAmelCase , **__lowerCAmelCase , disable=__lowerCAmelCase )
132
"""simple docstring""" import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class __a : '''simple docstring''' def __init__( self , _a , _a=3 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent SCREAMING_SNAKE_CASE__ : int = batch_size SCREAMING_SNAKE_CASE__ : Dict = seq_length SCREAMING_SNAKE_CASE__ : int = is_training SCREAMING_SNAKE_CASE__ : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE__ : Optional[Any] = use_labels SCREAMING_SNAKE_CASE__ : Tuple = vocab_size SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers SCREAMING_SNAKE_CASE__ : int = num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE__ : int = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range SCREAMING_SNAKE_CASE__ : Dict = num_labels SCREAMING_SNAKE_CASE__ : Optional[Any] = num_choices SCREAMING_SNAKE_CASE__ : Optional[int] = scope def _a ( self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : int = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : int = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self ) -> str: """simple docstring""" return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_a , ) def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = FalconModel(config=_a ) model.to(_a ) model.eval() SCREAMING_SNAKE_CASE__ : str = model(_a , attention_mask=_a ) SCREAMING_SNAKE_CASE__ : Any = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : Dict = FalconModel(_a ) model.to(_a ) model.eval() SCREAMING_SNAKE_CASE__ : int = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , ) SCREAMING_SNAKE_CASE__ : List[str] = model( _a , attention_mask=_a , encoder_hidden_states=_a , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , attention_mask=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = FalconForCausalLM(config=_a ) model.to(_a ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : Optional[int] = FalconForCausalLM(config=_a ) model.to(_a ) model.eval() # first forward pass SCREAMING_SNAKE_CASE__ : int = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , ) SCREAMING_SNAKE_CASE__ : str = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE__ : Tuple = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0] SCREAMING_SNAKE_CASE__ : Tuple = model( _a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0] # select random slice SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) ) def _a ( self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : Tuple = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :List[Any] = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE :Any = (FalconForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE :List[Any] = ( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE :List[str] = False _SCREAMING_SNAKE_CASE :str = False def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = FalconModelTester(self ) SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=_a , hidden_size=37 ) def _a ( self ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: SCREAMING_SNAKE_CASE__ : str = alibi self.model_tester.create_and_check_model(_a , *_a ) def _a ( self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] = 3 SCREAMING_SNAKE_CASE__ : Optional[Any] = input_dict["""input_ids"""] SCREAMING_SNAKE_CASE__ : str = input_ids.ne(1 ).to(_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = FalconForSequenceClassification(_a ) model.to(_a ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3 SCREAMING_SNAKE_CASE__ : Optional[Any] = """single_label_classification""" SCREAMING_SNAKE_CASE__ : List[str] = input_dict["""input_ids"""] SCREAMING_SNAKE_CASE__ : Tuple = input_ids.ne(1 ).to(_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : List[Any] = FalconForSequenceClassification(_a ) model.to(_a ) model.eval() SCREAMING_SNAKE_CASE__ : str = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Optional[int] = input_dict["""input_ids"""] SCREAMING_SNAKE_CASE__ : int = FalconForCausalLM(_a ) model.to(_a ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = model(_a , use_cache=_a ) SCREAMING_SNAKE_CASE__ : str = input_ids.shape[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = model._convert_to_rw_cache(result.past_key_values ) SCREAMING_SNAKE_CASE__ : Any = model._convert_cache_to_standard_format(_a , _a ) for layer in range(len(_a ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[str] = 3 SCREAMING_SNAKE_CASE__ : str = """multi_label_classification""" SCREAMING_SNAKE_CASE__ : int = input_dict["""input_ids"""] SCREAMING_SNAKE_CASE__ : List[str] = input_ids.ne(1 ).to(_a ) SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE__ : int = FalconForSequenceClassification(_a ) model.to(_a ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(_a , attention_mask=_a , labels=_a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self ) -> List[str]: """simple docstring""" for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(_a , """use_cache""" ): return SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(_a ).to(_a ) if "use_cache" not in inputs: SCREAMING_SNAKE_CASE__ : Optional[Any] = True SCREAMING_SNAKE_CASE__ : Tuple = model(**_a ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return SCREAMING_SNAKE_CASE__ : Dict = ( getattr(_a , """decoder_layers""" , _a ) or getattr(_a , """num_decoder_layers""" , _a ) or config.num_hidden_layers ) SCREAMING_SNAKE_CASE__ : Any = getattr(_a , """num_kv_heads""" , config.num_attention_heads ) SCREAMING_SNAKE_CASE__ : str = getattr(_a , """d_model""" , config.hidden_size ) SCREAMING_SNAKE_CASE__ : str = embed_dim // num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = outputs["""past_key_values"""] self.assertEqual(len(_a ) , _a ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = inputs["""input_ids"""].shape for i in range(_a ): if config.new_decoder_architecture: SCREAMING_SNAKE_CASE__ : Any = config.num_attention_heads elif config.multi_query: SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class __a (unittest.TestCase): '''simple docstring''' @slow def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" ) SCREAMING_SNAKE_CASE__ : Tuple = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" ) model.eval() model.to(_a ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( """My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.""" ) SCREAMING_SNAKE_CASE__ : Any = model.generate(**_a , do_sample=_a , max_new_tokens=19 ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(_a )[0] self.assertEqual(_a , _a ) @slow def _a ( self ) -> Union[str, Any]: """simple docstring""" for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(_a ) SCREAMING_SNAKE_CASE__ : Optional[int] = FalconForCausalLM.from_pretrained(_a ) model.eval() model.to(_a ) SCREAMING_SNAKE_CASE__ : str = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**_a , do_sample=_a , max_new_tokens=4 ) model.generate(**_a , do_sample=_a , max_new_tokens=4 ) model.generate(**_a , num_beams=2 , max_new_tokens=4 ) @slow def _a ( self ) -> Optional[int]: """simple docstring""" with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained(_a ) SCREAMING_SNAKE_CASE__ : int = FalconForCausalLM.from_pretrained(_a ) model.eval() model.to(device=_a ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(_a ) # Test results are the same with and without cache SCREAMING_SNAKE_CASE__ : Tuple = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a ) SCREAMING_SNAKE_CASE__ : Tuple = model.generate(**_a , do_sample=_a , max_new_tokens=20 , use_cache=_a ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
132
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class UpperCAmelCase_ : def __init__( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]=1_3 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[Any]=9_9 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Union[str, Any]=3_7 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=5_1_2 , UpperCAmelCase__ : Optional[int]=1_6 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Optional[Any]=None , ) -> List[Any]: lowerCAmelCase = parent lowerCAmelCase = 1_3 lowerCAmelCase = 7 lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = 9_9 lowerCAmelCase = 3_2 lowerCAmelCase = 2 lowerCAmelCase = 4 lowerCAmelCase = 3_7 lowerCAmelCase = 'gelu' lowerCAmelCase = 0.1 lowerCAmelCase = 0.1 lowerCAmelCase = 5_1_2 lowerCAmelCase = 1_6 lowerCAmelCase = 2 lowerCAmelCase = 0.02 lowerCAmelCase = 3 lowerCAmelCase = 4 lowerCAmelCase = None def __UpperCAmelCase ( self : Any ) -> List[str]: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> List[Any]: lowerCAmelCase = TFRoFormerModel(config=UpperCAmelCase__ ) lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(UpperCAmelCase__ ) lowerCAmelCase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ) -> Tuple: lowerCAmelCase = True lowerCAmelCase = TFRoFormerForCausalLM(config=UpperCAmelCase__ ) lowerCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } lowerCAmelCase = model(UpperCAmelCase__ )['logits'] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) -> Optional[Any]: lowerCAmelCase = TFRoFormerForMaskedLM(config=UpperCAmelCase__ ) lowerCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } lowerCAmelCase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: lowerCAmelCase = self.num_labels lowerCAmelCase = TFRoFormerForSequenceClassification(config=UpperCAmelCase__ ) lowerCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } lowerCAmelCase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] ) -> str: lowerCAmelCase = self.num_choices lowerCAmelCase = TFRoFormerForMultipleChoice(config=UpperCAmelCase__ ) lowerCAmelCase = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } lowerCAmelCase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) -> str: lowerCAmelCase = self.num_labels lowerCAmelCase = TFRoFormerForTokenClassification(config=UpperCAmelCase__ ) lowerCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } lowerCAmelCase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]: lowerCAmelCase = TFRoFormerForQuestionAnswering(config=UpperCAmelCase__ ) lowerCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } lowerCAmelCase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self : int ) -> Dict: lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ): lowerCamelCase : Optional[int] = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) lowerCamelCase : Tuple = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase : str = False lowerCamelCase : List[Any] = False def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ) -> str: if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def __UpperCAmelCase ( self : Optional[Any] ) -> int: lowerCAmelCase = TFRoFormerModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple: self.config_tester.run_common_tests() def __UpperCAmelCase ( self : str ) -> List[str]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[str] ) -> Any: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[str] ) -> List[Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*UpperCAmelCase__ ) def __UpperCAmelCase ( self : int ) -> List[Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[int] ) -> Dict: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: lowerCAmelCase = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' ) self.assertIsNotNone(UpperCAmelCase__ ) @require_tf class UpperCAmelCase_ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: lowerCAmelCase = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase = model(UpperCAmelCase__ )[0] # TODO Replace vocab size lowerCAmelCase = 5_0_0_0_0 lowerCAmelCase = [1, 6, vocab_size] self.assertEqual(output.shape , UpperCAmelCase__ ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. lowerCAmelCase = tf.constant( [ [ [-0.12_053_341, -1.0_264_901, 0.29_221_946], [-1.5_133_783, 0.197_433, 0.15_190_607], [-5.0_135_403, -3.900_256, -0.84_038_764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) @require_tf class UpperCAmelCase_ ( unittest.TestCase ): lowerCamelCase : List[str] = 1E-4 def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: lowerCAmelCase = tf.constant([[4, 1_0]] ) lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) lowerCAmelCase = emba(input_ids.shape ) lowerCAmelCase = tf.constant( [[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] ) tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , atol=self.tolerance ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: lowerCAmelCase = tf.constant( [ [0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000], [0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617], [0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870], ] ) lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) lowerCAmelCase = emba.weight[:3, :5] tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , atol=self.tolerance ) @require_tf class UpperCAmelCase_ ( unittest.TestCase ): lowerCamelCase : str = 1E-4 def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: # 2,12,16,64 lowerCAmelCase = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 lowerCAmelCase = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) lowerCAmelCase = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] lowerCAmelCase , lowerCAmelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = tf.constant( [ [0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700], [-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343], [-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985], [-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871], [0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980], [3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253], ] ) lowerCAmelCase = tf.constant( [ [0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700], [0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343], [1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985], [2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871], [-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980], [-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase__ , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase__ , atol=self.tolerance )
55
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: lowerCAmelCase = jnp.ones((batch_size, length) ) / length return scores def __UpperCAmelCase ( self : Optional[Any] ) -> str: lowerCAmelCase = None lowerCAmelCase = 2_0 lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__ ) # tweak scores to not be uniform anymore lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCAmelCase = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCAmelCase = jax.nn.softmax(UpperCAmelCase__ , axis=-1 ) lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 ) lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: lowerCAmelCase = None lowerCAmelCase = 1_0 lowerCAmelCase = 2 # create ramp distribution lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCAmelCase = FlaxTopKLogitsWarper(3 ) lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCAmelCase = 5 lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, length) ).copy() lowerCAmelCase = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def __UpperCAmelCase ( self : Any ) -> str: lowerCAmelCase = None lowerCAmelCase = 1_0 lowerCAmelCase = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) lowerCAmelCase = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCAmelCase = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def __UpperCAmelCase ( self : int ) -> Union[str, Any]: lowerCAmelCase = 2_0 lowerCAmelCase = 4 lowerCAmelCase = 0 lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ ) # check that min length is applied at length 5 lowerCAmelCase = ids_tensor((batch_size, 2_0) , vocab_size=2_0 ) lowerCAmelCase = 5 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] ) # check that min length is not applied anymore at length 15 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = 1_5 lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() ) def __UpperCAmelCase ( self : Dict ) -> str: lowerCAmelCase = 2_0 lowerCAmelCase = 4 lowerCAmelCase = 0 lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ ) # check that all scores are -inf except the bos_token_id score lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=2_0 ) lowerCAmelCase = 1 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCAmelCase = 3 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: lowerCAmelCase = 2_0 lowerCAmelCase = 4 lowerCAmelCase = 0 lowerCAmelCase = 5 lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=2_0 ) lowerCAmelCase = 4 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCAmelCase = 3 lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() ) def __UpperCAmelCase ( self : Optional[Any] ) -> Dict: lowerCAmelCase = 4 lowerCAmelCase = 1_0 lowerCAmelCase = 1_5 lowerCAmelCase = 2 lowerCAmelCase = 1 lowerCAmelCase = 1_5 # dummy input_ids and scores lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ ) lowerCAmelCase = input_ids.copy() lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scores.copy() # instantiate all dist processors lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase = FlaxTopKLogitsWarper(3 ) lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ ) lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ ) lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) lowerCAmelCase = 1_0 # no processor list lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # with processor list lowerCAmelCase = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # scores should be equal self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def __UpperCAmelCase ( self : int ) -> List[str]: lowerCAmelCase = 4 lowerCAmelCase = 1_0 lowerCAmelCase = 1_5 lowerCAmelCase = 2 lowerCAmelCase = 1 lowerCAmelCase = 1_5 # dummy input_ids and scores lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ ) lowerCAmelCase = input_ids.copy() lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scores.copy() # instantiate all dist processors lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase = FlaxTopKLogitsWarper(3 ) lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ ) lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ ) lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) lowerCAmelCase = 1_0 # no processor list def run_no_processor_list(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ): lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) return scores # with processor list def run_processor_list(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ): lowerCAmelCase = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) return scores lowerCAmelCase = jax.jit(UpperCAmelCase__ ) lowerCAmelCase = jax.jit(UpperCAmelCase__ ) lowerCAmelCase = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # scores should be equal self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
55
1
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : str = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) A_ : Dict = AutoTokenizer.from_pretrained("google/mt5-small" ) A_ : int = tokenizer("Hello there" , return_tensors="np" ).input_ids A_ : Optional[int] = tokenizer("Hi I am" , return_tensors="np" ).input_ids A_ : Optional[Any] = shift_tokens_right(snake_case , model.config.pad_token_id , model.config.decoder_start_token_id ) A_ : str = model(snake_case , decoder_input_ids=snake_case ).logits A_ : Optional[int] = optax.softmax_cross_entropy(snake_case , onehot(snake_case , logits.shape[-1] ) ).mean() A_ : Dict = -(labels.shape[-1] * loss.item()) A_ : Any = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
300
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : """simple docstring""" def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ): '''simple docstring''' A_ : str = parent A_ : str = batch_size A_ : str = seq_length A_ : Any = is_training A_ : Any = use_input_mask A_ : str = use_token_type_ids A_ : Tuple = use_labels A_ : Optional[Any] = vocab_size A_ : Dict = hidden_size A_ : str = num_hidden_layers A_ : Dict = num_attention_heads A_ : str = intermediate_size A_ : int = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Optional[Any] = max_position_embeddings A_ : List[Any] = type_vocab_size A_ : Any = type_sequence_label_size A_ : Dict = initializer_range A_ : Any = num_labels A_ : Optional[int] = num_choices A_ : Optional[Any] = scope A_ : Any = range_bbox def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ : str = bbox[i, j, 3] A_ : Union[str, Any] = bbox[i, j, 1] A_ : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: A_ : Any = bbox[i, j, 2] A_ : Tuple = bbox[i, j, 0] A_ : int = t A_ : int = tf.convert_to_tensor(snake_case ) A_ : Any = None if self.use_input_mask: A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : str = None if self.use_token_type_ids: A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Dict = None A_ : List[Any] = None A_ : List[str] = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : str = ids_tensor([self.batch_size] , self.num_choices ) A_ : int = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ): '''simple docstring''' A_ : Any = TFLayoutLMModel(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A_ : str = model(snake_case , snake_case , token_type_ids=snake_case ) A_ : List[Any] = model(snake_case , snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ): '''simple docstring''' A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case ) A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Union[str, Any] = self.num_labels A_ : int = TFLayoutLMForSequenceClassification(config=snake_case ) A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.num_labels A_ : str = TFLayoutLMForTokenClassification(config=snake_case ) A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case ) A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = config_and_inputs A_ : Optional[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = True __UpperCamelCase = 10 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : Tuple = TFLayoutLMModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' pass def __snake_case ( ) -> Optional[Any]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs() # forward pass A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the sequence output on [0, :3, :3] A_ : List[Any] = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) ) # test the pooled output on [1, :3] A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs() # forward pass A_ : Dict = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar A_ : List[str] = outputs.loss A_ : Union[str, Any] = (2,) self.assertEqual(loss.shape , snake_case ) # test the shape of the logits A_ : Tuple = outputs.logits A_ : Tuple = (2, 2) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) # test the shape of the logits A_ : Dict = outputs.logits A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , snake_case ) @slow def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs() # forward pass A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the shape of the logits A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , snake_case ) self.assertEqual(outputs.end_logits.shape , snake_case )
300
1
'''simple docstring''' def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float: '''simple docstring''' if digit_amount > 0: return round(number - int(_UpperCAmelCase ) , _UpperCAmelCase ) return number - int(_UpperCAmelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.3_45, 1)) print(decimal_isolate(35.3_45, 2)) print(decimal_isolate(35.3_45, 3)) print(decimal_isolate(-14.7_89, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.1_23, 1)) print(decimal_isolate(-14.1_23, 2)) print(decimal_isolate(-14.1_23, 3))
351
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" lowerCamelCase = DebertaTokenizer lowerCamelCase = True lowerCamelCase = DebertaTokenizerFast def lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case : int = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] snake_case : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) snake_case : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case : List[Any] = {'''unk_token''': '''[UNK]'''} snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCamelCase__ ) ) def lowerCAmelCase ( self : Union[str, Any] , **UpperCamelCase__ : Any ) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def lowerCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ) -> Optional[int]: """simple docstring""" snake_case : Tuple = '''lower newer''' snake_case : Optional[Any] = '''lower newer''' return input_text, output_text def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" snake_case : Dict = self.get_tokenizer() snake_case : Optional[Any] = '''lower newer''' snake_case : Tuple = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] snake_case : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) snake_case : Union[str, Any] = tokens + [tokenizer.unk_token] snake_case : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ ) def lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" snake_case : int = self.get_tokenizer() snake_case : Optional[int] = tokenizer('''Hello''' , '''World''' ) snake_case : Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , UpperCamelCase__ ) @slow def lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" snake_case : Optional[int] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) snake_case : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ ) snake_case : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ ) snake_case : Dict = tokenizer.encode( '''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ ) snake_case : Optional[int] = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ ) snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ ) snake_case : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" snake_case : Dict = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: snake_case : Any = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) snake_case : Optional[Any] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] snake_case : Optional[Any] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ ) snake_case : List[str] = [tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) for seq in encoding['''input_ids''']] # fmt: off snake_case : Optional[int] = { '''input_ids''': [ [1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on snake_case : Any = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , UpperCamelCase__ ) for expected, decoded in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
83
0
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : Any = logging.get_logger(__name__) def a__ ( a__ , a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = original_name.split(""".""" )[0] __SCREAMING_SNAKE_CASE = key.split(""".""" ) __SCREAMING_SNAKE_CASE = int(key_list[key_list.index(a__ ) - 2] ) __SCREAMING_SNAKE_CASE = int(key_list[key_list.index(a__ ) - 1] ) __SCREAMING_SNAKE_CASE = orig_block_num - offset __SCREAMING_SNAKE_CASE = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' ) return key def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = OrderedDict() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0 for key, value in state_dict.items(): if key.startswith("""network""" ): __SCREAMING_SNAKE_CASE = key.replace("""network""" , """poolformer.encoder""" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("""bias""" ) and "patch_embed" not in key: patch_emb_offset += 1 __SCREAMING_SNAKE_CASE = key[: key.find("""proj""" )] __SCREAMING_SNAKE_CASE = key.replace(a__ , F'patch_embeddings.{total_embed_found}.' ) __SCREAMING_SNAKE_CASE = key.replace("""proj""" , """projection""" ) if key.endswith("""bias""" ): total_embed_found += 1 if "patch_embeddings" in key: __SCREAMING_SNAKE_CASE = """poolformer.encoder.""" + key if "mlp.fc1" in key: __SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """mlp.fc1""" , """output.conv1""" ) if "mlp.fc2" in key: __SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """mlp.fc2""" , """output.conv2""" ) if "norm1" in key: __SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """norm1""" , """before_norm""" ) if "norm2" in key: __SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """norm2""" , """after_norm""" ) if "layer_scale_1" in key: __SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """layer_scale_1""" , """layer_scale_1""" ) if "layer_scale_2" in key: __SCREAMING_SNAKE_CASE = replace_key_with_offset(a__ , a__ , """layer_scale_2""" , """layer_scale_2""" ) if "head" in key: __SCREAMING_SNAKE_CASE = key.replace("""head""" , """classifier""" ) __SCREAMING_SNAKE_CASE = value return new_state_dict def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" __SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ) return image @torch.no_grad() def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = PoolFormerConfig() # set attributes based on model_name __SCREAMING_SNAKE_CASE = """huggingface/label-files""" __SCREAMING_SNAKE_CASE = model_name[-3:] __SCREAMING_SNAKE_CASE = 10_00 __SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" __SCREAMING_SNAKE_CASE = (1, 10_00) # set config attributes __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""" ) , """r""" ) ) __SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} if size == "s12": __SCREAMING_SNAKE_CASE = [2, 2, 6, 2] __SCREAMING_SNAKE_CASE = [64, 1_28, 3_20, 5_12] __SCREAMING_SNAKE_CASE = 4.0 __SCREAMING_SNAKE_CASE = 0.9 elif size == "s24": __SCREAMING_SNAKE_CASE = [4, 4, 12, 4] __SCREAMING_SNAKE_CASE = [64, 1_28, 3_20, 5_12] __SCREAMING_SNAKE_CASE = 4.0 __SCREAMING_SNAKE_CASE = 0.9 elif size == "s36": __SCREAMING_SNAKE_CASE = [6, 6, 18, 6] __SCREAMING_SNAKE_CASE = [64, 1_28, 3_20, 5_12] __SCREAMING_SNAKE_CASE = 4.0 __SCREAMING_SNAKE_CASE = 1E-6 __SCREAMING_SNAKE_CASE = 0.9 elif size == "m36": __SCREAMING_SNAKE_CASE = [6, 6, 18, 6] __SCREAMING_SNAKE_CASE = [96, 1_92, 3_84, 7_68] __SCREAMING_SNAKE_CASE = 4.0 __SCREAMING_SNAKE_CASE = 1E-6 __SCREAMING_SNAKE_CASE = 0.95 elif size == "m48": __SCREAMING_SNAKE_CASE = [8, 8, 24, 8] __SCREAMING_SNAKE_CASE = [96, 1_92, 3_84, 7_68] __SCREAMING_SNAKE_CASE = 4.0 __SCREAMING_SNAKE_CASE = 1E-6 __SCREAMING_SNAKE_CASE = 0.95 else: raise ValueError(F'Size {size} not supported' ) # load image processor __SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=a__ ) # Prepare image __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=a__ , return_tensors="""pt""" ).pixel_values logger.info(F'Converting model {model_name}...' ) # load original state dict __SCREAMING_SNAKE_CASE = torch.load(a__ , map_location=torch.device("""cpu""" ) ) # rename keys __SCREAMING_SNAKE_CASE = rename_keys(a__ ) # create HuggingFace model and load state dict __SCREAMING_SNAKE_CASE = PoolFormerForImageClassification(a__ ) model.load_state_dict(a__ ) model.eval() # Define image processor __SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=a__ ) __SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values # forward pass __SCREAMING_SNAKE_CASE = model(a__ ) __SCREAMING_SNAKE_CASE = outputs.logits # define expected logit slices for different models if size == "s12": __SCREAMING_SNAKE_CASE = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": __SCREAMING_SNAKE_CASE = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": __SCREAMING_SNAKE_CASE = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": __SCREAMING_SNAKE_CASE = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": __SCREAMING_SNAKE_CASE = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(F'Size {size} not supported' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a__ , atol=1E-2 ) # finally, save model and image processor logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(a__ ) if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) UpperCAmelCase : Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
267
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class __a : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride _UpperCAmelCase = out_features _UpperCAmelCase = out_indices def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ['stem'] _UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : int = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} _a : Optional[int] = False _a : List[str] = False _a : List[str] = False _a : Optional[int] = False _a : Tuple = False def UpperCAmelCase__ ( self ) -> int: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" return def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swin has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = 0 return t def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ): with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple() def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has''' f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.''' ) , ) recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} ) @require_torch class __a ( unittest.TestCase , UpperCAmelCase ): _a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else () _a : Any = MaskFormerSwinConfig def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = MaskFormerSwinModelTester(self ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: _UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE ) backbone.to(_SCREAMING_SNAKE_CASE ) backbone.eval() _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: _UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(outputs.attentions )
329
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class a__ ( a__ , a__ ): '''simple docstring''' lowercase__ : List[Any] = "nat" lowercase__ : List[str] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , lowerCamelCase_=4 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=[3, 4, 6, 5] , lowerCamelCase_=[2, 4, 8, 16] , lowerCamelCase_=7 , lowerCamelCase_=3.0 , lowerCamelCase_=True , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_="gelu" , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_=0.0 , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> str: super().__init__(**lowerCamelCase_ ) lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = embed_dim lowerCAmelCase__ = depths lowerCAmelCase__ = len(lowerCamelCase_ ) lowerCAmelCase__ = num_heads lowerCAmelCase__ = kernel_size lowerCAmelCase__ = mlp_ratio lowerCAmelCase__ = qkv_bias lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = drop_path_rate lowerCAmelCase__ = hidden_act lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) lowerCAmelCase__ = layer_scale_init_value lowerCAmelCase__ = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
228
'''simple docstring''' from __future__ import annotations class a__ : '''simple docstring''' def __init__( self , lowerCamelCase_ ) -> None: lowerCAmelCase__ = order # a_{0} ... a_{k} lowerCAmelCase__ = [1.0] + [0.0] * order # b_{0} ... b_{k} lowerCAmelCase__ = [1.0] + [0.0] * order # x[n-1] ... x[n-k] lowerCAmelCase__ = [0.0] * self.order # y[n-1] ... y[n-k] lowerCAmelCase__ = [0.0] * self.order def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None: if len(lowerCamelCase_ ) < self.order: lowerCAmelCase__ = [1.0, *a_coeffs] if len(lowerCamelCase_ ) != self.order + 1: lowerCAmelCase__ = ( F"""Expected a_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(lowerCamelCase_ )}""" ) raise ValueError(lowerCamelCase_ ) if len(lowerCamelCase_ ) != self.order + 1: lowerCAmelCase__ = ( F"""Expected b_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(lowerCamelCase_ )}""" ) raise ValueError(lowerCamelCase_ ) lowerCAmelCase__ = a_coeffs lowerCAmelCase__ = b_coeffs def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> float: lowerCAmelCase__ = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) lowerCAmelCase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] lowerCAmelCase__ = self.input_history[:-1] lowerCAmelCase__ = self.output_history[:-1] lowerCAmelCase__ = sample lowerCAmelCase__ = result return result
228
1
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCAmelCase : def __init__( self: str , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: int=3 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: str=True , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=99 , UpperCAmelCase_: Optional[Any]=36 , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Dict=4 , UpperCAmelCase_: List[str]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: str=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Optional[Any]=6 , UpperCAmelCase_: Any=6 , UpperCAmelCase_: str=3 , UpperCAmelCase_: Optional[int]=4 , UpperCAmelCase_: str=None , UpperCAmelCase_: Any=1_000 , ): '''simple docstring''' _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = patch_size _SCREAMING_SNAKE_CASE = text_seq_length _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_input_mask _SCREAMING_SNAKE_CASE = use_token_type_ids _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = type_sequence_label_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = coordinate_size _SCREAMING_SNAKE_CASE = shape_size _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = num_choices _SCREAMING_SNAKE_CASE = scope _SCREAMING_SNAKE_CASE = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _SCREAMING_SNAKE_CASE = text_seq_length _SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1 _SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length def UpperCamelCase ( self: Any ): '''simple docstring''' _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _SCREAMING_SNAKE_CASE = bbox[i, j, 3] _SCREAMING_SNAKE_CASE = bbox[i, j, 1] _SCREAMING_SNAKE_CASE = t if bbox[i, j, 2] < bbox[i, j, 0]: _SCREAMING_SNAKE_CASE = bbox[i, j, 2] _SCREAMING_SNAKE_CASE = bbox[i, j, 0] _SCREAMING_SNAKE_CASE = t _SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCamelCase ( self: Any , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: str , UpperCAmelCase_: int ): '''simple docstring''' _SCREAMING_SNAKE_CASE = LayoutLMvaModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() # text + image _SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = model( UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _SCREAMING_SNAKE_CASE = model(pixel_values=UpperCAmelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def UpperCamelCase ( self: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = LayoutLMvaForSequenceClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _SCREAMING_SNAKE_CASE = model( UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = LayoutLMvaForTokenClassification(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _SCREAMING_SNAKE_CASE = model( UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: int , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: str ): '''simple docstring''' _SCREAMING_SNAKE_CASE = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() _SCREAMING_SNAKE_CASE = model( UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self: int ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) = config_and_inputs _SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): __snake_case : int = False __snake_case : Union[str, Any] = False __snake_case : Tuple = False __snake_case : Tuple = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) __snake_case : List[str] = ( {"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel} if is_torch_available() else {} ) def UpperCamelCase ( self: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Any ): '''simple docstring''' return True def UpperCamelCase ( self: str ): '''simple docstring''' _SCREAMING_SNAKE_CASE = LayoutLMvaModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 ) def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Any=False ): '''simple docstring''' _SCREAMING_SNAKE_CASE = copy.deepcopy(UpperCAmelCase_ ) if model_class in get_values(UpperCAmelCase_ ): _SCREAMING_SNAKE_CASE = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(UpperCAmelCase_ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCAmelCase_ ): _SCREAMING_SNAKE_CASE = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ ) elif model_class in get_values(UpperCAmelCase_ ): _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ ) elif model_class in [ *get_values(UpperCAmelCase_ ), ]: _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ ) elif model_class in [ *get_values(UpperCAmelCase_ ), ]: _SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , ) return inputs_dict def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def UpperCamelCase ( self: Tuple ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def UpperCamelCase ( self: Any ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ ) def UpperCamelCase ( self: Any ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ ) def UpperCamelCase ( self: Optional[Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ ) @slow def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class __UpperCAmelCase (unittest.TestCase ): @cached_property def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ ) if is_vision_available() else None @slow def UpperCamelCase ( self: List[Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = self.default_image_processor _SCREAMING_SNAKE_CASE = prepare_img() _SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values.to(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] ) _SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass _SCREAMING_SNAKE_CASE = model( input_ids=input_ids.to(UpperCAmelCase_ ) , bbox=bbox.to(UpperCAmelCase_ ) , pixel_values=pixel_values.to(UpperCAmelCase_ ) , ) # verify the logits _SCREAMING_SNAKE_CASE = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = torch.tensor( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
306
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list: """simple docstring""" _SCREAMING_SNAKE_CASE = len(snake_case__ ) _SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )] for i in range(snake_case__ ): _SCREAMING_SNAKE_CASE = y_points[i] for i in range(2 ,snake_case__ ): for j in range(snake_case__ ,snake_case__ ): _SCREAMING_SNAKE_CASE = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
306
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict: """simple docstring""" __lowerCAmelCase : Optional[int] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base") __lowerCAmelCase : List[str] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __lowerCAmelCase : Dict = model(snake_case__)["last_hidden_state"] __lowerCAmelCase : int = tf.TensorShape((1, 10, 768)) self.assertEqual(output.shape , snake_case__) # compare the actual values for a slice. __lowerCAmelCase : Dict = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
360
"""simple docstring""" from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract __snake_case : Tuple = logging.get_logger(__name__) def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any: return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def _lowercase ( __snake_case ,__snake_case ,__snake_case = None ) -> Tuple: __lowerCAmelCase : Tuple = tesseract_config if tesseract_config is not None else "" # apply OCR __lowerCAmelCase : List[str] = to_pil_image(__snake_case ) __lowerCAmelCase , __lowerCAmelCase : Optional[int] = pil_image.size __lowerCAmelCase : str = pytesseract.image_to_data(__snake_case ,lang=__snake_case ,output_type="dict" ,config=__snake_case ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates __lowerCAmelCase : List[str] = [idx for idx, word in enumerate(__snake_case ) if not word.strip()] __lowerCAmelCase : Any = [word for idx, word in enumerate(__snake_case ) if idx not in irrelevant_indices] __lowerCAmelCase : Any = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] __lowerCAmelCase : List[Any] = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] __lowerCAmelCase : List[Any] = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] __lowerCAmelCase : List[str] = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __lowerCAmelCase : List[Any] = [] for x, y, w, h in zip(__snake_case ,__snake_case ,__snake_case ,__snake_case ): __lowerCAmelCase : Optional[Any] = [x, y, x + w, y + h] actual_boxes.append(__snake_case ) # finally, normalize the bounding boxes __lowerCAmelCase : Optional[Any] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__snake_case ,__snake_case ,__snake_case ) ) assert len(__snake_case ) == len(__snake_case ), "Not as many words as there are bounding boxes" return words, normalized_boxes class A__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' SCREAMING_SNAKE_CASE = ['pixel_values'] def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[str] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "" , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> None: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE) __lowerCAmelCase : Optional[int] = size if size is not None else {"height": 224, "width": 224} __lowerCAmelCase : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE) __lowerCAmelCase : Dict = do_resize __lowerCAmelCase : Optional[int] = size __lowerCAmelCase : Union[str, Any] = resample __lowerCAmelCase : Dict = apply_ocr __lowerCAmelCase : Dict = ocr_lang __lowerCAmelCase : List[str] = tesseract_config def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Dict[str, int] , _SCREAMING_SNAKE_CASE: PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: Any , ) -> np.ndarray: """simple docstring""" __lowerCAmelCase : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""") __lowerCAmelCase : Dict = (size["height"], size["width"]) return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: ImageInput , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: PILImageResampling = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Optional[str] = None , _SCREAMING_SNAKE_CASE: Optional[str] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: ChannelDimension = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: List[str] , ) -> PIL.Image.Image: """simple docstring""" __lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase : Optional[int] = size if size is not None else self.size __lowerCAmelCase : int = get_size_dict(_SCREAMING_SNAKE_CASE) __lowerCAmelCase : str = resample if resample is not None else self.resample __lowerCAmelCase : Any = apply_ocr if apply_ocr is not None else self.apply_ocr __lowerCAmelCase : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang __lowerCAmelCase : Tuple = tesseract_config if tesseract_config is not None else self.tesseract_config __lowerCAmelCase : str = make_list_of_images(_SCREAMING_SNAKE_CASE) if not valid_images(_SCREAMING_SNAKE_CASE): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") # All transformations expect numpy arrays. __lowerCAmelCase : List[str] = [to_numpy_array(_SCREAMING_SNAKE_CASE) for image in images] if apply_ocr: requires_backends(self , "pytesseract") __lowerCAmelCase : Tuple = [] __lowerCAmelCase : Optional[int] = [] for image in images: __lowerCAmelCase , __lowerCAmelCase : Any = apply_tesseract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) words_batch.append(_SCREAMING_SNAKE_CASE) boxes_batch.append(_SCREAMING_SNAKE_CASE) if do_resize: __lowerCAmelCase : Optional[int] = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) __lowerCAmelCase : List[str] = [flip_channel_order(_SCREAMING_SNAKE_CASE) for image in images] __lowerCAmelCase : str = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) for image in images] __lowerCAmelCase : int = BatchFeature(data={"pixel_values": images} , tensor_type=_SCREAMING_SNAKE_CASE) if apply_ocr: __lowerCAmelCase : Optional[int] = words_batch __lowerCAmelCase : Optional[int] = boxes_batch return data
58
0
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""") # TF training parameters a_ : Optional[Any] = False a_ : int = False def __snake_case ( UpperCAmelCase_ : Namespace ): return TrainCommand(UpperCAmelCase_ ) class snake_case ( lowercase ): """simple docstring""" @staticmethod def snake_case ( UpperCamelCase ): """simple docstring""" lowerCamelCase_ = parser.add_parser("train" , help="CLI tool to train a model on a task." ) train_parser.add_argument( "--train_data" , type=UpperCamelCase , required=UpperCamelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , ) train_parser.add_argument( "--column_label" , type=UpperCamelCase , default=0 , help="Column of the dataset csv file with example labels." ) train_parser.add_argument( "--column_text" , type=UpperCamelCase , default=1 , help="Column of the dataset csv file with example texts." ) train_parser.add_argument( "--column_id" , type=UpperCamelCase , default=2 , help="Column of the dataset csv file with example ids." ) train_parser.add_argument( "--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." ) train_parser.add_argument("--validation_data" , type=UpperCamelCase , default="" , help="path to validation dataset." ) train_parser.add_argument( "--validation_split" , type=UpperCamelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , ) train_parser.add_argument("--output" , type=UpperCamelCase , default="./" , help="path to saved the trained model." ) train_parser.add_argument( "--task" , type=UpperCamelCase , default="text_classification" , help="Task to train the model on." ) train_parser.add_argument( "--model" , type=UpperCamelCase , default="bert-base-uncased" , help="Model's name or path to stored model." ) train_parser.add_argument("--train_batch_size" , type=UpperCamelCase , default=32 , help="Batch size for training." ) train_parser.add_argument("--valid_batch_size" , type=UpperCamelCase , default=64 , help="Batch size for validation." ) train_parser.add_argument("--learning_rate" , type=UpperCamelCase , default=3e-5 , help="Learning rate." ) train_parser.add_argument("--adam_epsilon" , type=UpperCamelCase , default=1e-08 , help="Epsilon for Adam optimizer." ) train_parser.set_defaults(func=UpperCamelCase ) def __init__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = logging.get_logger("transformers-cli/training" ) lowerCamelCase_ = "tf" if is_tf_available() else "torch" os.makedirs(args.output , exist_ok=UpperCamelCase ) lowerCamelCase_ = args.output lowerCamelCase_ = args.column_label lowerCamelCase_ = args.column_text lowerCamelCase_ = args.column_id self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' ) if args.task == "text_classification": lowerCamelCase_ = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f'''Loading dataset from {args.train_data}''' ) lowerCamelCase_ = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowerCamelCase_ = None if args.validation_data: self.logger.info(f'''Loading validation dataset from {args.validation_data}''' ) lowerCamelCase_ = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowerCamelCase_ = args.validation_split lowerCamelCase_ = args.train_batch_size lowerCamelCase_ = args.valid_batch_size lowerCamelCase_ = args.learning_rate lowerCamelCase_ = args.adam_epsilon def snake_case ( self ): """simple docstring""" if self.framework == "tf": return self.run_tf() return self.run_torch() def snake_case ( self ): """simple docstring""" raise NotImplementedError def snake_case ( self ): """simple docstring""" self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
55
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging a_ : Any = logging.get_logger(__name__) a_ : Optional[Any] = {"""vocab_file""": """spiece.model"""} a_ : Tuple = { """vocab_file""": { """TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""", } } class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="<unk>" , UpperCamelCase="<sep>" , UpperCamelCase="<pad>" , UpperCamelCase="<cls>" , UpperCamelCase="<mask>" , UpperCamelCase=["<eop>", "<eod>"] , UpperCamelCase = None , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , ) lowerCamelCase_ = 3 lowerCamelCase_ = do_lower_case lowerCamelCase_ = remove_space lowerCamelCase_ = keep_accents lowerCamelCase_ = vocab_file lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) lowerCamelCase_ = jieba lowerCamelCase_ = str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def snake_case ( self ): """simple docstring""" return len(self.sp_model ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ = self.__dict__.copy() lowerCamelCase_ = None return state def __setstate__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase_ = {} lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if self.remove_space: lowerCamelCase_ = " ".join(inputs.strip().split() ) else: lowerCamelCase_ = inputs lowerCamelCase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase_ = unicodedata.normalize("NFKD" , UpperCamelCase ) lowerCamelCase_ = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] ) if self.do_lower_case: lowerCamelCase_ = outputs.lower() return outputs def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.preprocess_text(UpperCamelCase ) lowerCamelCase_ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase ) lowerCamelCase_ = [] for piece in pieces: if len(UpperCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase_ = cur_pieces[1:] else: lowerCamelCase_ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase ) else: new_pieces.append(UpperCamelCase ) return new_pieces def snake_case ( self , UpperCamelCase ): """simple docstring""" return self.sp_model.PieceToId(UpperCamelCase ) def snake_case ( self , UpperCamelCase ): """simple docstring""" return self.sp_model.IdToPiece(UpperCamelCase ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "".join(UpperCamelCase ).replace(UpperCamelCase , " " ).strip() return out_string def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) if token_ids_a is not None: return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1] return ([0] * len(UpperCamelCase )) + [1, 1] def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" if not os.path.isdir(UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ = os.path.join( UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase , "wb" ) as fi: lowerCamelCase_ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase ) return (out_vocab_file,) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = super()._decode(*UpperCamelCase , **UpperCamelCase ) lowerCamelCase_ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
55
1
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Dict: '''simple docstring''' A__ = checkpoint A__ = {} A__ = vae_state_dict['encoder.conv_in.weight'] A__ = vae_state_dict['encoder.conv_in.bias'] A__ = vae_state_dict['encoder.conv_out.weight'] A__ = vae_state_dict['encoder.conv_out.bias'] A__ = vae_state_dict['encoder.norm_out.weight'] A__ = vae_state_dict['encoder.norm_out.bias'] A__ = vae_state_dict['decoder.conv_in.weight'] A__ = vae_state_dict['decoder.conv_in.bias'] A__ = vae_state_dict['decoder.conv_out.weight'] A__ = vae_state_dict['decoder.conv_out.bias'] A__ = vae_state_dict['decoder.norm_out.weight'] A__ = vae_state_dict['decoder.norm_out.bias'] A__ = vae_state_dict['quant_conv.weight'] A__ = vae_state_dict['quant_conv.bias'] A__ = vae_state_dict['post_quant_conv.weight'] A__ = vae_state_dict['post_quant_conv.bias'] # Retrieves the keys for the encoder down blocks only A__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} ) A__ = { layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE__ ) } # Retrieves the keys for the decoder up blocks only A__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} ) A__ = { layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE__ ) } for i in range(SCREAMING_SNAKE_CASE__ ): A__ = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key] if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: A__ = vae_state_dict.pop( f'encoder.down.{i}.downsample.conv.weight' ) A__ = vae_state_dict.pop( f'encoder.down.{i}.downsample.conv.bias' ) A__ = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE__ ) A__ = {'old': f'down.{i}.block', 'new': f'down_blocks.{i}.resnets'} assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ ) A__ = [key for key in vae_state_dict if 'encoder.mid.block' in key] A__ = 2 for i in range(1 , num_mid_res_blocks + 1 ): A__ = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key] A__ = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE__ ) A__ = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'} assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ ) A__ = [key for key in vae_state_dict if 'encoder.mid.attn' in key] A__ = renew_vae_attention_paths(SCREAMING_SNAKE_CASE__ ) A__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ ) conv_attn_to_linear(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ): A__ = num_up_blocks - 1 - i A__ = [ key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key ] if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: A__ = vae_state_dict[ f'decoder.up.{block_id}.upsample.conv.weight' ] A__ = vae_state_dict[ f'decoder.up.{block_id}.upsample.conv.bias' ] A__ = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE__ ) A__ = {'old': f'up.{block_id}.block', 'new': f'up_blocks.{i}.resnets'} assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ ) A__ = [key for key in vae_state_dict if 'decoder.mid.block' in key] A__ = 2 for i in range(1 , num_mid_res_blocks + 1 ): A__ = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key] A__ = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE__ ) A__ = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'} assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ ) A__ = [key for key in vae_state_dict if 'decoder.mid.attn' in key] A__ = renew_vae_attention_paths(SCREAMING_SNAKE_CASE__ ) A__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ ) conv_attn_to_linear(SCREAMING_SNAKE_CASE__ ) return new_checkpoint def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , ) -> str: '''simple docstring''' A__ = requests.get( ' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' ) A__ = io.BytesIO(r.content ) A__ = OmegaConf.load(SCREAMING_SNAKE_CASE__ ) A__ = 512 A__ = 'cuda' if torch.cuda.is_available() else 'cpu' if checkpoint_path.endswith('safetensors' ): from safetensors import safe_open A__ = {} with safe_open(SCREAMING_SNAKE_CASE__ , framework='pt' , device='cpu' ) as f: for key in f.keys(): A__ = f.get_tensor(SCREAMING_SNAKE_CASE__ ) else: A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )['state_dict'] # Convert the VAE model. A__ = create_vae_diffusers_config(SCREAMING_SNAKE_CASE__ , image_size=SCREAMING_SNAKE_CASE__ ) A__ = custom_convert_ldm_vae_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = AutoencoderKL(**SCREAMING_SNAKE_CASE__ ) vae.load_state_dict(SCREAMING_SNAKE_CASE__ ) vae.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.") parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.") lowercase_ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
356
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
282
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self) -> List[str]: __UpperCamelCase :int = tempfile.mkdtemp() # fmt: off __UpperCamelCase :int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __UpperCamelCase :Tuple = dict(zip(__lowercase , range(len(__lowercase)))) __UpperCamelCase :List[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __UpperCamelCase :Optional[Any] = {'''unk_token''': '''<unk>'''} __UpperCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) __UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp: fp.write(json.dumps(__lowercase) + '''\n''') with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp: fp.write('''\n'''.join(__lowercase)) __UpperCamelCase :List[str] = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } __UpperCamelCase :Any = os.path.join(self.tmpdirname , __lowercase) with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp: json.dump(__lowercase , __lowercase) def UpperCamelCase__ ( self , **__lowercase) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase) def UpperCamelCase__ ( self , **__lowercase) -> Any: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase) def UpperCamelCase__ ( self , **__lowercase) -> List[str]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase) def UpperCamelCase__ ( self) -> List[str]: shutil.rmtree(self.tmpdirname) def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] __UpperCamelCase :int = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1)) for x in image_inputs] return image_inputs def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :Dict = self.get_tokenizer() __UpperCamelCase :str = self.get_rust_tokenizer() __UpperCamelCase :Optional[Any] = self.get_image_processor() __UpperCamelCase :List[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase) processor_slow.save_pretrained(self.tmpdirname) __UpperCamelCase :Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase) __UpperCamelCase :Union[str, Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase) processor_fast.save_pretrained(self.tmpdirname) __UpperCamelCase :Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , __lowercase) self.assertIsInstance(processor_fast.tokenizer , __lowercase) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , __lowercase) self.assertIsInstance(processor_fast.image_processor , __lowercase) def UpperCamelCase__ ( self) -> Union[str, Any]: __UpperCamelCase :Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) __UpperCamelCase :int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''') __UpperCamelCase :List[Any] = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0) __UpperCamelCase :Optional[Any] = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , __lowercase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __lowercase) def UpperCamelCase__ ( self) -> Optional[Any]: __UpperCamelCase :Optional[int] = self.get_image_processor() __UpperCamelCase :Optional[Any] = self.get_tokenizer() __UpperCamelCase :Dict = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase) __UpperCamelCase :Dict = self.prepare_image_inputs() __UpperCamelCase :Union[str, Any] = image_processor(__lowercase , return_tensors='''np''') __UpperCamelCase :str = processor(images=__lowercase , return_tensors='''np''') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2) def UpperCamelCase__ ( self) -> Tuple: __UpperCamelCase :Tuple = self.get_image_processor() __UpperCamelCase :Dict = self.get_tokenizer() __UpperCamelCase :str = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase) __UpperCamelCase :Tuple = '''lower newer''' __UpperCamelCase :str = processor(text=__lowercase) __UpperCamelCase :Optional[Any] = tokenizer(__lowercase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def UpperCamelCase__ ( self) -> Any: __UpperCamelCase :str = self.get_image_processor() __UpperCamelCase :Tuple = self.get_tokenizer() __UpperCamelCase :Optional[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase) __UpperCamelCase :Optional[int] = '''lower newer''' __UpperCamelCase :str = self.prepare_image_inputs() __UpperCamelCase :Any = processor(text=__lowercase , images=__lowercase) self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values''']) # test if it raises when no input is passed with pytest.raises(__lowercase): processor() def UpperCamelCase__ ( self) -> Any: __UpperCamelCase :str = self.get_image_processor() __UpperCamelCase :Tuple = self.get_tokenizer() __UpperCamelCase :List[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase) __UpperCamelCase :Optional[Any] = self.prepare_image_inputs() __UpperCamelCase :Optional[Any] = self.prepare_image_inputs() __UpperCamelCase :List[Any] = processor(images=__lowercase , visual_prompt=__lowercase) self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''conditional_pixel_values''']) # test if it raises when no input is passed with pytest.raises(__lowercase): processor() def UpperCamelCase__ ( self) -> Dict: __UpperCamelCase :Optional[Any] = self.get_image_processor() __UpperCamelCase :List[str] = self.get_tokenizer() __UpperCamelCase :Optional[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase) __UpperCamelCase :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __UpperCamelCase :Optional[Any] = processor.batch_decode(__lowercase) __UpperCamelCase :Any = tokenizer.batch_decode(__lowercase) self.assertListEqual(__lowercase , __lowercase)
43
'''simple docstring''' from __future__ import annotations from collections.abc import MutableSequence class lowercase__ : def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : MutableSequence[float] ): '''simple docstring''' if len(lowerCamelCase__ ) != degree + 1: raise ValueError( 'The number of coefficients should be equal to the degree + 1.' ) _UpperCamelCase : list[float] = list(lowerCamelCase__ ) _UpperCamelCase : Tuple = degree def __add__( self : Optional[int] ,lowerCamelCase__ : Polynomial ): '''simple docstring''' if self.degree > polynomial_a.degree: _UpperCamelCase : str = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree ,lowerCamelCase__ ) else: _UpperCamelCase : str = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree ,lowerCamelCase__ ) def __sub__( self : Dict ,lowerCamelCase__ : Polynomial ): '''simple docstring''' return self + polynomial_a * Polynomial(0 ,[-1] ) def __neg__( self : Dict ): '''simple docstring''' return Polynomial(self.degree ,[-c for c in self.coefficients] ) def __mul__( self : Union[str, Any] ,lowerCamelCase__ : Polynomial ): '''simple docstring''' _UpperCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree ,lowerCamelCase__ ) def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : int | float ): '''simple docstring''' _UpperCamelCase : int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Union[str, Any] ): '''simple docstring''' _UpperCamelCase : Dict = '' for i in range(self.degree ,-1 ,-1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase__ ) return polynomial def __repr__( self : List[str] ): '''simple docstring''' return self.__str__() def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' _UpperCamelCase : list[float] = [0] * self.degree for i in range(self.degree ): _UpperCamelCase : Optional[int] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 ,lowerCamelCase__ ) def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int | float = 0 ): '''simple docstring''' _UpperCamelCase : list[float] = [0] * (self.degree + 2) _UpperCamelCase : Any = constant for i in range(self.degree + 1 ): _UpperCamelCase : Optional[Any] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 ,lowerCamelCase__ ) def __eq__( self : str ,lowerCamelCase__ : object ): '''simple docstring''' if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : List[str] ,lowerCamelCase__ : object ): '''simple docstring''' return not self.__eq__(lowerCamelCase__ )
83
0
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowercase__ ( )-> Union[str, Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(__UpperCamelCase ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def lowercase__ ( )-> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def lowercase__ ( )-> str: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(__UpperCamelCase ): http_head("""https://huggingface.co""" )
183
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def lowercase__ ( __UpperCamelCase )-> int: UpperCamelCase = prime_factors(__UpperCamelCase ) if is_square_free(__UpperCamelCase ): return -1 if len(__UpperCamelCase ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
183
1
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) def __A ( __lowerCamelCase ) -> Tuple: a = torch.load(__lowerCamelCase , map_location="""cpu""" ) if "model" in sd.keys(): a = torch.load(__lowerCamelCase , map_location="""cpu""" )["""model"""] # pop unnecessary weights a = [ """decoder.version""", """decoder.output_projection.weight""", ] for key in keys_to_delete: if key in sd: sd.pop(__lowerCamelCase ) a = { """decoder.project_in_dim.weight""": """decoder.project_in.weight""", """decoder.project_out_dim.weight""": """decoder.project_out.weight""", """decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: a = sd.pop(__lowerCamelCase ) a = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: a = sd[key] # We split QKV in separate Q,K,V a = key.replace(""".qkv_proj.""" , """.q_proj.""" ) a = key.replace(""".qkv_proj.""" , """.k_proj.""" ) a = key.replace(""".qkv_proj.""" , """.v_proj.""" ) a = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 a , a , a = torch.split(__lowerCamelCase , depth // 3 , dim=0 ) a = q a = k a = v del sd[key] return sd @torch.no_grad() def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> List[Any]: a = load_checkpoint(__lowerCamelCase ) if config is not None: a = OPTConfig.from_pretrained(__lowerCamelCase ) else: a = OPTConfig() a = OPTModel(__lowerCamelCase ).half().eval() model.load_state_dict(__lowerCamelCase ) # Check results Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") __UpperCamelCase : int = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
228
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Optional[Any] = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
228
1
'''simple docstring''' import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : List[Any] = PhobertTokenizer lowerCAmelCase_ : Union[str, Any] = False def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@'''] UpperCAmelCase__ = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) UpperCAmelCase__ = ['''#version: 0.2''', '''l à</w>'''] UpperCAmelCase__ = {'''unk_token''': '''<unk>'''} UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(f'''{token} {vocab_tokens[token]}\n''' ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__lowercase ) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple , **_UpperCAmelCase : str ): """simple docstring""" kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = '''Tôi là VinAI Research''' UpperCAmelCase__ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>''' return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase__ = '''Tôi là VinAI Research''' UpperCAmelCase__ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split() UpperCAmelCase__ = tokenizer.tokenize(__lowercase ) print(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) UpperCAmelCase__ = tokens + [tokenizer.unk_token] UpperCAmelCase__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
366
'''simple docstring''' import enum import shutil import sys UpperCAmelCase_ , UpperCAmelCase_ = shutil.get_terminal_size() UpperCAmelCase_ = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'} class lowerCAmelCase_ ( enum.Enum ): '''simple docstring''' lowerCAmelCase_ : int = 0 lowerCAmelCase_ : Union[str, Any] = 1 def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]="" ): '''simple docstring''' sys.stdout.write(str(SCREAMING_SNAKE_CASE__ ) + end ) sys.stdout.flush() def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int="" ): '''simple docstring''' forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( ): '''simple docstring''' forceWrite("""\r""" ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' ) def _UpperCamelCase ( ): '''simple docstring''' forceWrite(""" """ * TERMINAL_WIDTH ) reset_cursor() def _UpperCamelCase ( ): '''simple docstring''' reset_cursor() forceWrite("""-""" * TERMINAL_WIDTH )
61
0
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class snake_case_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case__( self : int ) ->int: snake_case_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) snake_case_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(_UpperCamelCase ) from datasets import load_dataset snake_case_ = load_dataset('''nielsr/rvlcdip-demo''' ) snake_case_ = dataset['''train'''][0]['''image'''].convert('''RGB''' ) snake_case_ = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase ) # forward pass with torch.no_grad(): snake_case_ = model(**_UpperCamelCase ) snake_case_ = outputs.logits snake_case_ = torch.Size((1, 1_6) ) self.assertEqual(logits.shape , _UpperCamelCase ) snake_case_ = torch.tensor( [-0.4158, -0.4092, -0.4347] , device=_UpperCamelCase , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
8
'''simple docstring''' import importlib.metadata import operator import re import sys from typing import Optional from packaging import version lowercase_ = { """<""": operator.lt, """<=""": operator.le, """==""": operator.eq, """!=""": operator.ne, """>=""": operator.ge, """>""": operator.gt, } def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ) ->Tuple: if got_ver is None or want_ver is None: raise ValueError( F'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider' F' reinstalling {pkg}.' ) if not ops[op](version.parse(__lowerCamelCase ) , version.parse(__lowerCamelCase ) ): raise ImportError( F'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' ) def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) ->None: _SCREAMING_SNAKE_CASE = F'\n{hint}' if hint is not None else """""" # non-versioned check if re.match(R"""^[\w_\-\d]+$""" , __lowerCamelCase ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = requirement, None, None else: _SCREAMING_SNAKE_CASE = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , __lowerCamelCase ) if not match: raise ValueError( """requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but""" F' got {requirement}' ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = match[0] _SCREAMING_SNAKE_CASE = want_full.split(""",""" ) # there could be multiple requirements _SCREAMING_SNAKE_CASE = {} for w in want_range: _SCREAMING_SNAKE_CASE = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , __lowerCamelCase ) if not match: raise ValueError( """requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,""" F' but got {requirement}' ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = match[0] _SCREAMING_SNAKE_CASE = want_ver if op not in ops: raise ValueError(F'{requirement}: need one of {list(ops.keys() )}, but got {op}' ) # special case if pkg == "python": _SCREAMING_SNAKE_CASE = """.""".join([str(__lowerCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return # check if any version is installed try: _SCREAMING_SNAKE_CASE = importlib.metadata.version(__lowerCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F'The \'{requirement}\' distribution was not found and is required by this application. {hint}' ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def lowerCamelCase ( __lowerCamelCase : Union[str, Any] ) ->str: _SCREAMING_SNAKE_CASE = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main""" return require_version(__lowerCamelCase , __lowerCamelCase )
58
0
from typing import Union import fire import torch from tqdm import tqdm def A (__A : str , __A : str = "cpu" , __A : Union[str, None] = None ) -> None: """simple docstring""" UpperCAmelCase_ = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ ) for k, v in tqdm(state_dict.items() ): if not isinstance(UpperCAmelCase_ , torch.Tensor ): raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' ) UpperCAmelCase_ = v.half() if save_path is None: # overwrite src_path UpperCAmelCase_ = src_path torch.save(UpperCAmelCase_ , UpperCAmelCase_ ) if __name__ == "__main__": fire.Fire(convert)
356
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right snake_case_ : Optional[Any] = 128022 snake_case_ : Optional[int] = 128028 @require_sentencepiece class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : List[str] = MaMaaaTokenizer UpperCAmelCase__ : int = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : List[str] = True def lowerCamelCase ( self : str): """simple docstring""" super().setUp() UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case)))) UpperCAmelCase_ = Path(self.tmpdirname) save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file''']) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]): """simple docstring""" return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case) def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]): """simple docstring""" return ( "This is a test", "This is a test", ) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = '''</s>''' UpperCAmelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = list(tokenizer.get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''</s>''') self.assertEqual(vocab_keys[1] , '''<unk>''') self.assertEqual(vocab_keys[-1] , '''<s>''') self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab())) @unittest.skip('''Skip this test while all models are still to be uploaded.''') def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = tokenizer.tokenize('''This is a test''') self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6]) self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case) self.assertEqual(_snake_case , '''This is a test''') @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : Dict = '''facebook/m2m100_418M''' UpperCAmelCase__ : Dict = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] UpperCAmelCase__ : Dict = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''') UpperCAmelCase_ = 1 return cls def lowerCamelCase ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006) self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022) self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076) self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.tokenizer.get_vocab() self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size) self.assertEqual(vocab['''<unk>'''] , 3) self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , _snake_case) def lowerCamelCase ( self : Any): """simple docstring""" self.assertIn(_snake_case , self.tokenizer.all_special_ids) # fmt: off UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case) UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case) self.assertEqual(_snake_case , _snake_case) self.assertNotIn(self.tokenizer.eos_token , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case) self.assertDictEqual(new_tok.lang_token_to_id , _snake_case) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = '''fr''' UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''') UpperCAmelCase_ = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id) for k in batch: UpperCAmelCase_ = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) UpperCAmelCase_ = '''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) @require_torch def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) UpperCAmelCase_ = '''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''') self.assertEqual( nested_simplify(_snake_case) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
7
0
"""simple docstring""" import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : nn.ModuleList , snake_case__ : nn.ModuleList , snake_case__ : List[int] ): A = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(__lowercase ) == len(__lowercase ), F'{len(__lowercase )} != {len(__lowercase )}' dest_layers.load_state_dict(layers_to_copy.state_dict() ) _lowercase = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } _lowercase = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] ): try: A = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first' F' {n_student}' ) return list(range(__lowercase ) ) def _snake_case ( snake_case__ : Dict , snake_case__ : Optional[Any] ): if n_student > n_teacher: raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' ) elif n_teacher == n_student: return list(range(__lowercase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _snake_case ( snake_case__ : Union[str, PreTrainedModel] , snake_case__ : Union[str, Path] = "student" , snake_case__ : Union[int, None] = None , snake_case__ : Union[int, None] = None , snake_case__ : List[str]=False , snake_case__ : Tuple=None , snake_case__ : str=None , **snake_case__ : Any , ): A = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(__lowercase , __lowercase ): AutoTokenizer.from_pretrained(__lowercase ).save_pretrained(__lowercase ) # purely for convenience A = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ).eval() else: assert isinstance(__lowercase , __lowercase ), F'teacher must be a model or string got type {type(__lowercase )}' A = teacher.config.to_diff_dict() try: A , A = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: A = teacher_e if d is None: A = teacher_d init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} ) except AttributeError: # T5 if hasattr(teacher.config , 'num_encoder_layers' ): A , A = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: A , A = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: A = teacher_e if d is None: A = teacher_d if hasattr(teacher.config , 'num_encoder_layers' ): init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} ) else: init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(__lowercase ) # Copy weights A = teacher.config_class(**__lowercase ) A = AutoModelForSeqaSeqLM.from_config(__lowercase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. A = student.load_state_dict(teacher.state_dict() , strict=__lowercase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save A , A = list(range(__lowercase ) ), list(range(__lowercase ) ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to' F' {save_path}' ) student.save_pretrained(__lowercase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: A = pick_layers_to_copy(__lowercase , __lowercase ) if d_layers_to_copy is None: A = pick_layers_to_copy(__lowercase , __lowercase ) try: if hasattr( __lowercase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowercase ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowercase ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowercase ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowercase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , __lowercase ) copy_layers(teacher.decoder.block , student.decoder.block , __lowercase ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' ) A = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(__lowercase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
74
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: _lowerCamelCase : int = None _lowerCamelCase : List[str] = logging.get_logger(__name__) _lowerCamelCase : Tuple = '''▁''' _lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCamelCase : Any = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}, '''tokenizer_file''': { '''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json''' }, } _lowerCamelCase : Optional[int] = { '''google/pegasus-xsum''': 512, } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _UpperCAmelCase : int = VOCAB_FILES_NAMES _UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Any = PegasusTokenizer _UpperCAmelCase : Dict = ["input_ids", "attention_mask"] def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ): '''simple docstring''' _snake_case = offset if additional_special_tokens is not None: if not isinstance(lowercase , lowercase ): raise TypeError( f'''additional_special_tokens should be of type {type(lowercase )}, but is''' f''' {type(lowercase )}''' ) _snake_case = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 ) ] if len(set(lowercase ) ) != len(lowercase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) _snake_case = additional_special_tokens_extended else: _snake_case = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , ) _snake_case = vocab_file _snake_case = False if not self.vocab_file else True def A ( self : List[str] , lowercase : Optional[int] ): '''simple docstring''' _snake_case = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( 'There should be 3 special tokens: mask_token, pad_token, and eos_token +' f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ): '''simple docstring''' if already_has_special_tokens: return self._special_token_mask(lowercase ) elif token_ids_a is None: return self._special_token_mask(lowercase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def A ( self : Any , lowercase : Tuple , lowercase : Any=None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def A ( self : int , lowercase : str , lowercase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowercase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ): copyfile(self.vocab_file , lowercase ) return (out_vocab_file,)
282
0
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm _UpperCamelCase = re.compile("""[^A-Za-z_0-9]""") # parameters used in DuplicationIndex _UpperCamelCase = 10 _UpperCamelCase = 256 def _a ( _snake_case ): """simple docstring""" if len(_snake_case ) < MIN_NUM_TOKENS: return None UpperCAmelCase = MinHash(num_perm=_snake_case ) for token in set(_snake_case ): min_hash.update(token.encode() ) return min_hash def _a ( _snake_case ): """simple docstring""" return {t for t in NON_ALPHA.split(_snake_case ) if len(t.strip() ) > 0} class lowerCamelCase__ : def __init__( self ,*, A = 0.85 ,): UpperCAmelCase = duplication_jaccard_threshold UpperCAmelCase = NUM_PERM UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm ) UpperCAmelCase = defaultdict(A ) def _UpperCamelCase ( self ,A ,A ): UpperCAmelCase = self._index.query(A ) if code_key in self._index.keys: print(F'''Duplicate key {code_key}''' ) return self._index.insert(A ,A ) if len(A ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(A ) break else: self._duplicate_clusters[close_duplicates[0]].add(A ) def _UpperCamelCase ( self ): UpperCAmelCase = [] for base, duplicates in self._duplicate_clusters.items(): UpperCAmelCase = [base] + list(A ) # reformat the cluster to be a list of dict UpperCAmelCase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(A ) return duplicate_clusters def _UpperCamelCase ( self ,A ): UpperCAmelCase = self.get_duplicate_clusters() with open(A ,"""w""" ) as f: json.dump(A ,A ) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = element UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def _a ( _snake_case ): """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(_snake_case , max_queue_size=1_0000 ) , chunksize=100 , ): if data is not None: yield data def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_snake_case ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_snake_case ) ) , max_queue_size=100 ) ): di.add(_snake_case , _snake_case ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = get_tokens(_snake_case ) UpperCAmelCase = get_tokens(_snake_case ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) _UpperCamelCase = None def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = [] for elementa in cluster: UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(_snake_case , _snake_case ) >= jaccard_threshold: elementa["copies"] += 1 break else: UpperCAmelCase = 1 extremes.append(_snake_case ) return extremes def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" global _shared_dataset UpperCAmelCase = dataset UpperCAmelCase = [] UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_snake_case ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _snake_case , _snake_case , ) , total=len(_snake_case ) , ): extremes_list.append(_snake_case ) return extremes_list def _a ( _snake_case , _snake_case = 0.85 ): """simple docstring""" UpperCAmelCase = make_duplicate_clusters(_snake_case , _snake_case ) UpperCAmelCase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} UpperCAmelCase = {} UpperCAmelCase = find_extremes(_snake_case , _snake_case , _snake_case ) for extremes in extremes_clusters: for element in extremes: UpperCAmelCase = element UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() ) UpperCAmelCase = dataset.filter(lambda _snake_case , _snake_case : idx not in remove_indices , with_indices=_snake_case ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: UpperCAmelCase = element["""base_index"""] in extreme_dict if element["is_extreme"]: UpperCAmelCase = extreme_dict[element["""base_index"""]]["""copies"""] print(F'''Original dataset size: {len(_snake_case )}''' ) print(F'''Number of duplicate clusters: {len(_snake_case )}''' ) print(F'''Files in duplicate cluster: {len(_snake_case )}''' ) print(F'''Unique files in duplicate cluster: {len(_snake_case )}''' ) print(F'''Filtered dataset size: {len(_snake_case )}''' ) return ds_filter, duplicate_clusters
366
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def _a ( _snake_case ): """simple docstring""" if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_snake_case ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ['''pixel_values'''] def __init__( self ,A = True ,A = None ,A = PILImageResampling.BILINEAR ,A = True ,A = None ,A = True ,A = 1 / 255 ,A = True ,A = True ,A = None ,A = None ,**A ,): super().__init__(**A ) UpperCAmelCase = size if size is not None else {"""shortest_edge""": 256} UpperCAmelCase = get_size_dict(A ,default_to_square=A ) UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase = get_size_dict(A ,param_name="""crop_size""" ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = offset UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCamelCase ( self ,A ,A ,A = PILImageResampling.BILINEAR ,A = None ,**A ,): UpperCAmelCase = get_size_dict(A ,default_to_square=A ) if "shortest_edge" in size: UpperCAmelCase = get_resize_output_image_size(A ,size["""shortest_edge"""] ,default_to_square=A ) elif "height" in size and "width" in size: UpperCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(A ,size=A ,resample=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A = None ,**A ,): UpperCAmelCase = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(A ,size=(size["""height"""], size["""width"""]) ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A = True ,A = None ,**A ,): UpperCAmelCase = image.astype(np.floataa ) if offset: UpperCAmelCase = image - (scale / 2) return rescale(A ,scale=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A ,A = None ,**A ,): return normalize(A ,mean=A ,std=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = ChannelDimension.FIRST ,): if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. UpperCAmelCase = to_numpy_array(A ) if do_resize: UpperCAmelCase = self.resize(image=A ,size=A ,resample=A ) if do_center_crop: UpperCAmelCase = self.center_crop(A ,size=A ) if do_rescale: UpperCAmelCase = self.rescale(image=A ,scale=A ,offset=A ) if do_normalize: UpperCAmelCase = self.normalize(image=A ,mean=A ,std=A ) UpperCAmelCase = to_channel_dimension_format(A ,A ) return image def _UpperCamelCase ( self ,A ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = ChannelDimension.FIRST ,**A ,): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = offset if offset is not None else self.offset UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(A ,default_to_square=A ) UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(A ,param_name="""crop_size""" ) if not valid_images(A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) UpperCAmelCase = make_batched(A ) UpperCAmelCase = [ [ self._preprocess_image( image=A ,do_resize=A ,size=A ,resample=A ,do_center_crop=A ,crop_size=A ,do_rescale=A ,rescale_factor=A ,offset=A ,do_normalize=A ,image_mean=A ,image_std=A ,data_format=A ,) for img in video ] for video in videos ] UpperCAmelCase = {"""pixel_values""": videos} return BatchFeature(data=A ,tensor_type=A )
234
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Tuple = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class a ( __snake_case ): SCREAMING_SNAKE_CASE : Optional[Any] = """camembert""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[Any]=30522 , __SCREAMING_SNAKE_CASE : List[str]=768 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : Tuple=3072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1e-1_2 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : List[str]="absolute" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any: super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_act lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = position_embedding_type lowerCamelCase_ = use_cache lowerCamelCase_ = classifier_dropout class a ( __snake_case ): @property def UpperCamelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCamelCase_ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
183
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _SCREAMING_SNAKE_CASE : Tuple = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize _SCREAMING_SNAKE_CASE : Tuple = '''\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ''' _SCREAMING_SNAKE_CASE : int = '''\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. ''' _SCREAMING_SNAKE_CASE : List[Any] = ''' Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: \'meteor\': meteor score. Examples: >>> meteor = datasets.load_metric(\'meteor\') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results["meteor"], 4)) 0.6944 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def UpperCamelCase ( self : List[str] ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any]=0.9 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.5 ) -> int: if NLTK_VERSION >= version.Version('3.6.5' ): lowerCamelCase_ = [ meteor_score.single_meteor_score( word_tokenize(__SCREAMING_SNAKE_CASE ) , word_tokenize(__SCREAMING_SNAKE_CASE ) , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , gamma=__SCREAMING_SNAKE_CASE ) for ref, pred in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ] else: lowerCamelCase_ = [ meteor_score.single_meteor_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , gamma=__SCREAMING_SNAKE_CASE ) for ref, pred in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ] return {"meteor": np.mean(__SCREAMING_SNAKE_CASE )}
183
1
'''simple docstring''' import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Any = DownBlockaD # noqa F405 a : Tuple = "down" def _UpperCAmelCase (self ) -> Union[str, Any]: '''simple docstring''' __lowercase = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Optional[int] = ResnetDownsampleBlockaD # noqa F405 a : Union[str, Any] = "down" def _UpperCAmelCase (self ) -> List[str]: '''simple docstring''' __lowercase = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Union[str, Any] = AttnDownBlockaD # noqa F405 a : Union[str, Any] = "down" def _UpperCAmelCase (self ) -> str: '''simple docstring''' __lowercase = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : List[str] = CrossAttnDownBlockaD # noqa F405 a : Tuple = "down" def _UpperCAmelCase (self ) -> List[str]: '''simple docstring''' __lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common() __lowercase = 32 return init_dict, inputs_dict def _UpperCAmelCase (self ) -> Dict: '''simple docstring''' __lowercase = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405 a : int = "down" @property def _UpperCAmelCase (self ) -> Dict: '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Tuple: '''simple docstring''' __lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common() __lowercase = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' ) def _UpperCAmelCase (self ) -> Any: '''simple docstring''' __lowercase = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : List[Any] = SkipDownBlockaD # noqa F405 a : Tuple = "down" @property def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' return super().get_dummy_input(include_skip_sample=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Any: '''simple docstring''' __lowercase = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Optional[Any] = AttnSkipDownBlockaD # noqa F405 a : List[Any] = "down" @property def _UpperCAmelCase (self ) -> List[Any]: '''simple docstring''' return super().get_dummy_input(include_skip_sample=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' __lowercase = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Optional[Any] = DownEncoderBlockaD # noqa F405 a : str = "down" @property def _UpperCAmelCase (self ) -> Optional[Any]: '''simple docstring''' return super().get_dummy_input(include_temb=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Dict: '''simple docstring''' __lowercase = { '''in_channels''': 32, '''out_channels''': 32, } __lowercase = self.dummy_input return init_dict, inputs_dict def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' __lowercase = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405 a : Any = "down" @property def _UpperCAmelCase (self ) -> int: '''simple docstring''' return super().get_dummy_input(include_temb=_lowerCamelCase ) def _UpperCAmelCase (self ) -> List[str]: '''simple docstring''' __lowercase = { '''in_channels''': 32, '''out_channels''': 32, } __lowercase = self.dummy_input return init_dict, inputs_dict def _UpperCAmelCase (self ) -> Union[str, Any]: '''simple docstring''' __lowercase = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : str = UNetMidBlockaD # noqa F405 a : int = "mid" def _UpperCAmelCase (self ) -> Tuple: '''simple docstring''' __lowercase = { '''in_channels''': 32, '''temb_channels''': 128, } __lowercase = self.dummy_input return init_dict, inputs_dict def _UpperCAmelCase (self ) -> Union[str, Any]: '''simple docstring''' __lowercase = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : int = UNetMidBlockaDCrossAttn # noqa F405 a : Dict = "mid" def _UpperCAmelCase (self ) -> str: '''simple docstring''' __lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common() __lowercase = 32 return init_dict, inputs_dict def _UpperCAmelCase (self ) -> Dict: '''simple docstring''' __lowercase = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : List[str] = UNetMidBlockaDSimpleCrossAttn # noqa F405 a : Optional[Any] = "mid" @property def _UpperCAmelCase (self ) -> Dict: '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=_lowerCamelCase ) def _UpperCAmelCase (self ) -> List[str]: '''simple docstring''' __lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common() __lowercase = 32 return init_dict, inputs_dict def _UpperCAmelCase (self ) -> Union[str, Any]: '''simple docstring''' __lowercase = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Optional[int] = UpBlockaD # noqa F405 a : Optional[Any] = "up" @property def _UpperCAmelCase (self ) -> str: '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _UpperCAmelCase (self ) -> str: '''simple docstring''' __lowercase = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Any = ResnetUpsampleBlockaD # noqa F405 a : Optional[int] = "up" @property def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Union[str, Any]: '''simple docstring''' __lowercase = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : int = CrossAttnUpBlockaD # noqa F405 a : Dict = "up" @property def _UpperCAmelCase (self ) -> Any: '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Dict: '''simple docstring''' __lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common() __lowercase = 32 return init_dict, inputs_dict def _UpperCAmelCase (self ) -> List[str]: '''simple docstring''' __lowercase = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Optional[int] = SimpleCrossAttnUpBlockaD # noqa F405 a : Optional[Any] = "up" @property def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ,include_encoder_hidden_states=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Tuple: '''simple docstring''' __lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common() __lowercase = 32 return init_dict, inputs_dict def _UpperCAmelCase (self ) -> Tuple: '''simple docstring''' __lowercase = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Union[str, Any] = AttnUpBlockaD # noqa F405 a : Optional[Any] = "up" @property def _UpperCAmelCase (self ) -> Any: '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) @unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' ) def _UpperCAmelCase (self ) -> Optional[Any]: '''simple docstring''' __lowercase = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : str = SkipUpBlockaD # noqa F405 a : str = "up" @property def _UpperCAmelCase (self ) -> List[str]: '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Any: '''simple docstring''' __lowercase = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Union[str, Any] = AttnSkipUpBlockaD # noqa F405 a : List[str] = "up" @property def _UpperCAmelCase (self ) -> Tuple: '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Union[str, Any]: '''simple docstring''' __lowercase = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : List[Any] = UpDecoderBlockaD # noqa F405 a : List[Any] = "up" @property def _UpperCAmelCase (self ) -> List[Any]: '''simple docstring''' return super().get_dummy_input(include_temb=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Tuple: '''simple docstring''' __lowercase = {'''in_channels''': 32, '''out_channels''': 32} __lowercase = self.dummy_input return init_dict, inputs_dict def _UpperCAmelCase (self ) -> str: '''simple docstring''' __lowercase = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7] super().test_output(_lowerCamelCase ) class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Optional[int] = AttnUpDecoderBlockaD # noqa F405 a : Union[str, Any] = "up" @property def _UpperCAmelCase (self ) -> List[str]: '''simple docstring''' return super().get_dummy_input(include_temb=_lowerCamelCase ) def _UpperCAmelCase (self ) -> Any: '''simple docstring''' __lowercase = {'''in_channels''': 32, '''out_channels''': 32} __lowercase = self.dummy_input return init_dict, inputs_dict def _UpperCAmelCase (self ) -> Union[str, Any]: '''simple docstring''' __lowercase = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8] super().test_output(_lowerCamelCase )
217
'''simple docstring''' from math import sqrt def _lowerCAmelCase ( lowerCamelCase_ : int ): __lowercase = 0 for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(lowerCamelCase_ ): total += i + n // i elif i == sqrt(lowerCamelCase_ ): total += i return total - n def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0_0 ): __lowercase = sum( i for i in range(1 , lowerCamelCase_ ) if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
217
1
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline A__ : Union[str, Any] = { '''n_samples''': 64, '''horizon''': 32, '''num_inference_steps''': 20, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": A__ : Optional[int] = '''hopper-medium-v2''' A__ : int = gym.make(env_name) A__ : Optional[int] = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) A__ : int = env.reset() A__ : Optional[int] = 0 A__ : Union[str, Any] = 0 A__ : Union[str, Any] = 1000 A__ : Optional[Any] = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy A__ : Union[str, Any] = pipeline(obs, planning_horizon=32) # execute action in environment A__ , A__ , A__ , A__ : str = env.step(denorm_actions) A__ : Dict = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' F''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) A__ : List[str] = next_observation except KeyboardInterrupt: pass print(F'''Total reward: {total_reward}''')
103
"""simple docstring""" def __a ( __lowerCamelCase = 3, __lowerCamelCase = 7, __lowerCamelCase = 100_0000 ): UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = 1 for current_denominator in range(1, limit + 1 ): UpperCAmelCase_ : Dict = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: UpperCAmelCase_ : List[Any] = current_numerator UpperCAmelCase_ : Optional[int] = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
61
0
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __lowercase ( a__ , a__ ) -> List[Any]: __SCREAMING_SNAKE_CASE = old_name if "patch_embed" in old_name: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = old_name.split('.' ) if layer == "0": __SCREAMING_SNAKE_CASE = old_name.replace('0' , 'convolution1' ) elif layer == "1": __SCREAMING_SNAKE_CASE = old_name.replace('1' , 'batchnorm_before' ) elif layer == "3": __SCREAMING_SNAKE_CASE = old_name.replace('3' , 'convolution2' ) else: __SCREAMING_SNAKE_CASE = old_name.replace('4' , 'batchnorm_after' ) if "network" in old_name and re.search(R'\d\.\d' , a__ ): __SCREAMING_SNAKE_CASE = R'\b\d{2}\b' if bool(re.search(a__ , a__ ) ): __SCREAMING_SNAKE_CASE = re.search(R'\d\.\d\d.' , a__ ).group() else: __SCREAMING_SNAKE_CASE = re.search(R'\d\.\d.' , a__ ).group() if int(match[0] ) < 6: __SCREAMING_SNAKE_CASE = old_name.replace(a__ , '' ) __SCREAMING_SNAKE_CASE = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] ) __SCREAMING_SNAKE_CASE = 'intermediate_stages.' + trimmed_name else: __SCREAMING_SNAKE_CASE = old_name.replace(a__ , '' ) if int(match[2] ) < num_meta4D_last_stage: __SCREAMING_SNAKE_CASE = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] ) else: __SCREAMING_SNAKE_CASE = str(int(match[2] ) - num_meta4D_last_stage ) __SCREAMING_SNAKE_CASE = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index ) if "norm1" in old_name: __SCREAMING_SNAKE_CASE = trimmed_name.replace('norm1' , 'layernorm1' ) elif "norm2" in old_name: __SCREAMING_SNAKE_CASE = trimmed_name.replace('norm2' , 'layernorm2' ) elif "fc1" in old_name: __SCREAMING_SNAKE_CASE = trimmed_name.replace('fc1' , 'linear_in' ) elif "fc2" in old_name: __SCREAMING_SNAKE_CASE = trimmed_name.replace('fc2' , 'linear_out' ) __SCREAMING_SNAKE_CASE = 'last_stage.' + trimmed_name elif "network" in old_name and re.search(R'.\d.' , a__ ): __SCREAMING_SNAKE_CASE = old_name.replace('network' , 'intermediate_stages' ) if "fc" in new_name: __SCREAMING_SNAKE_CASE = new_name.replace('fc' , 'convolution' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): __SCREAMING_SNAKE_CASE = new_name.replace('norm1' , 'batchnorm_before' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): __SCREAMING_SNAKE_CASE = new_name.replace('norm2' , 'batchnorm_after' ) if "proj" in new_name: __SCREAMING_SNAKE_CASE = new_name.replace('proj' , 'projection' ) if "dist_head" in new_name: __SCREAMING_SNAKE_CASE = new_name.replace('dist_head' , 'distillation_classifier' ) elif "head" in new_name: __SCREAMING_SNAKE_CASE = new_name.replace('head' , 'classifier' ) elif "patch_embed" in new_name: __SCREAMING_SNAKE_CASE = 'efficientformer.' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __SCREAMING_SNAKE_CASE = new_name.replace('norm' , 'layernorm' ) __SCREAMING_SNAKE_CASE = 'efficientformer.' + new_name else: __SCREAMING_SNAKE_CASE = 'efficientformer.encoder.' + new_name return new_name def __lowercase ( a__ , a__ ) -> Optional[Any]: for key in checkpoint.copy().keys(): __SCREAMING_SNAKE_CASE = checkpoint.pop(a__ ) __SCREAMING_SNAKE_CASE = val return checkpoint def __lowercase ( ) -> Dict: __SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg' __SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ) return image def __lowercase ( a__ , a__ , a__ , a__ ) -> str: __SCREAMING_SNAKE_CASE = torch.load(a__ , map_location='cpu' )['model'] __SCREAMING_SNAKE_CASE = EfficientFormerConfig.from_json_file(a__ ) __SCREAMING_SNAKE_CASE = EfficientFormerForImageClassificationWithTeacher(a__ ) __SCREAMING_SNAKE_CASE = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] ) __SCREAMING_SNAKE_CASE = config.depths[-1] - config.num_metaad_blocks + 1 __SCREAMING_SNAKE_CASE = convert_torch_checkpoint(a__ , a__ ) model.load_state_dict(a__ ) model.eval() __SCREAMING_SNAKE_CASE = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } # prepare image __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = 2_56 __SCREAMING_SNAKE_CASE = 2_24 __SCREAMING_SNAKE_CASE = EfficientFormerImageProcessor( size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , ) __SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' ).pixel_values # original processing pipeline __SCREAMING_SNAKE_CASE = Compose( [ Resize(a__ , interpolation=pillow_resamplings['bicubic'] ), CenterCrop(a__ ), ToTensor(), Normalize(a__ , a__ ), ] ) __SCREAMING_SNAKE_CASE = image_transforms(a__ ).unsqueeze(0 ) assert torch.allclose(a__ , a__ ) __SCREAMING_SNAKE_CASE = model(a__ ) __SCREAMING_SNAKE_CASE = outputs.logits __SCREAMING_SNAKE_CASE = (1, 10_00) if "l1" in model_name: __SCREAMING_SNAKE_CASE = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , a__ , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: __SCREAMING_SNAKE_CASE = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , a__ , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: __SCREAMING_SNAKE_CASE = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(a__ ).mkdir(exist_ok=a__ ) model.save_pretrained(a__ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(a__ ) print(f"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print('Pushing model to the hub...' ) model.push_to_hub( repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=a__ , ) processor.push_to_hub( repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=a__ , ) if __name__ == "__main__": lowerCAmelCase__ : str =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to EfficientFormer pytorch checkpoint.''', ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for EfficientFormer model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) parser.set_defaults(push_to_hub=True) lowerCAmelCase__ : int =parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
118
def __lowercase ( a__ ) -> int: __SCREAMING_SNAKE_CASE = [[0 for _ in range(a__ )] for _ in range(m + 1 )] for i in range(m + 1 ): __SCREAMING_SNAKE_CASE = 1 for n in range(m + 1 ): for k in range(1 , a__ ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowerCAmelCase__ : Optional[Any] =int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowerCAmelCase__ : str =int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
118
1
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } UpperCAmelCase : List[Any] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" for attribute in key.split("." ): a__ : Optional[Any] =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: a__ : Optional[Any] =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: a__ : Optional[Any] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": a__ : Optional[int] =value elif weight_type == "weight_g": a__ : List[str] =value elif weight_type == "weight_v": a__ : Any =value elif weight_type == "bias": a__ : Optional[int] =value else: a__ : int =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ): """simple docstring""" a__ : int =[] a__ : Optional[Any] =fairseq_model.state_dict() a__ : Any =hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): a__ : List[str] =False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , ) a__ : Any =True else: for key, mapped_key in MAPPING.items(): a__ : str ="unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue a__ : Tuple =True if "*" in mapped_key: a__ : Union[str, Any] =name.split(SCREAMING_SNAKE_CASE )[0].split("." )[-2] a__ : Any =mapped_key.replace("*" , SCREAMING_SNAKE_CASE ) if "weight_g" in name: a__ : Any ="weight_g" elif "weight_v" in name: a__ : Union[str, Any] ="weight_v" elif "bias" in name: a__ : Tuple ="bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj a__ : int ="weight" else: a__ : Optional[Any] =None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" a__ : Optional[Any] =full_name.split("conv_layers." )[-1] a__ : Dict =name.split("." ) a__ : str =int(items[0] ) a__ : Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) a__ : Optional[Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) a__ : str =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) a__ : int =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) a__ : List[str] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Any=True ): """simple docstring""" if config_path is not None: a__ : Optional[int] =UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: a__ : Optional[Any] =UniSpeechSatConfig() a__ : List[Any] ="" if is_finetuned: a__ : Optional[int] =UniSpeechSatForCTC(SCREAMING_SNAKE_CASE ) else: a__ : int =UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE ) a__ , a__ , a__ : str =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) a__ : Optional[Any] =model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase : Union[str, Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
95
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowercase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , ) -> Union[str, Any]: '''simple docstring''' output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) else: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Tuple: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): A__ = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: A__ = 'cpu' A__ = Path(SCREAMING_SNAKE_CASE__ ) # VAE DECODER A__ = AutoencoderKL.from_pretrained(model_path + '/vae' ) A__ = vae_decoder.config.latent_channels # forward only through the decoder part A__ = vae_decoder.decode onnx_export( SCREAMING_SNAKE_CASE__ , model_args=( torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=SCREAMING_SNAKE_CASE__ , ) del vae_decoder if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowercase_ = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
7
0
"""simple docstring""" import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput a : int = '''scheduler_config.json''' class __UpperCamelCase ( __lowercase ): lowerCamelCase : List[str] =1 lowerCamelCase : Optional[int] =2 lowerCamelCase : Dict =3 lowerCamelCase : int =4 lowerCamelCase : Optional[int] =5 @dataclass class __UpperCamelCase ( __lowercase ): lowerCamelCase : jnp.ndarray class __UpperCamelCase : lowerCamelCase : int =SCHEDULER_CONFIG_NAME lowerCamelCase : int =["dtype"] lowerCamelCase : Optional[int] =[] lowerCamelCase : Optional[int] =True @classmethod def __a ( cls , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[int]: a : Tuple = cls.load_config( pretrained_model_name_or_path=snake_case_ , subfolder=snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ , ) a : Dict = cls.from_config(snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ ) if hasattr(snake_case_ , "create_state" ) and getattr(snake_case_ , "has_state" , snake_case_ ): a : str = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> Any: self.save_config(save_directory=snake_case_ , push_to_hub=snake_case_ , **snake_case_ ) @property def __a ( self ) -> Any: return self._get_compatibles() @classmethod def __a ( cls ) -> int: a : Tuple = list(set([cls.__name__] + cls._compatibles ) ) a : Union[str, Any] = importlib.import_module(__name__.split("." )[0] ) a : List[Any] = [ getattr(snake_case_ , snake_case_ ) for c in compatible_classes_str if hasattr(snake_case_ , snake_case_ ) ] return compatible_classes def _SCREAMING_SNAKE_CASE ( _lowercase : jnp.ndarray , _lowercase : Tuple[int] ) ->Tuple: '''simple docstring''' assert len(lowerCAmelCase__ ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCAmelCase__ ) - x.ndim) ) , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int=0.999 , _lowercase : Any=jnp.floataa ) ->List[Any]: '''simple docstring''' def alpha_bar(_lowercase : int ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 a : Optional[Any] = [] for i in range(lowerCAmelCase__ ): a : List[str] = i / num_diffusion_timesteps a : List[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(lowerCAmelCase__ ) / alpha_bar(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) return jnp.array(lowerCAmelCase__ , dtype=lowerCAmelCase__ ) @flax.struct.dataclass class __UpperCamelCase : lowerCamelCase : jnp.ndarray lowerCamelCase : jnp.ndarray lowerCamelCase : jnp.ndarray @classmethod def __a ( cls , lowerCAmelCase__ ) -> Optional[int]: a : int = scheduler.config if config.trained_betas is not None: a : Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": a : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. a : Union[str, Any] = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule a : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" ) a : List[str] = 1.0 - betas a : Any = jnp.cumprod(snake_case_ , axis=0 ) return cls( alphas=snake_case_ , betas=snake_case_ , alphas_cumprod=snake_case_ , ) def _SCREAMING_SNAKE_CASE ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) ->List[str]: '''simple docstring''' a : Optional[Any] = state.alphas_cumprod a : str = alphas_cumprod[timesteps] ** 0.5 a : List[Any] = sqrt_alpha_prod.flatten() a : Tuple = broadcast_to_shape_from_left(lowerCAmelCase__ , original_samples.shape ) a : Any = (1 - alphas_cumprod[timesteps]) ** 0.5 a : List[Any] = sqrt_one_minus_alpha_prod.flatten() a : Any = broadcast_to_shape_from_left(lowerCAmelCase__ , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def _SCREAMING_SNAKE_CASE ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) ->Optional[int]: '''simple docstring''' a : str = get_sqrt_alpha_prod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) a : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def _SCREAMING_SNAKE_CASE ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) ->Optional[int]: '''simple docstring''' a : Union[str, Any] = get_sqrt_alpha_prod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) a : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
351
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( '''pipelines_utils''', '''0.22.0''', '''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''', standard_warn=False, stacklevel=3, )
79
0
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE: """simple docstring""" @staticmethod def A ( *__snake_case : Optional[int] , **__snake_case : List[Any] ) -> Dict: pass def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> str: UpperCAmelCase : Optional[Any] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def snake_case_ ( _lowerCAmelCase : Tuple ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = np.array(__lowerCAmelCase ) UpperCAmelCase : Tuple = npimg.shape return {"hash": hashimage(__lowerCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class SCREAMING_SNAKE_CASE( unittest.TestCase ): """simple docstring""" lowerCamelCase__ = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) lowerCamelCase__ = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def A ( self : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase : List[Any] = MaskGenerationPipeline(model=lowerCamelCase__ , image_processor=lowerCamelCase__ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def A ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : Any ) -> List[Any]: pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def A ( self : Any ) -> Any: pass @slow @require_torch def A ( self : str ) -> List[str]: UpperCAmelCase : str = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) UpperCAmelCase : List[str] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase : List[str] = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(lowerCamelCase__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.04_44}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_21}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.01_67}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.01_32}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.00_53}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.99_67}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.9_93}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.99_09}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.98_79}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.98_34}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.97_16}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.96_12}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.95_99}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.95_52}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.95_32}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.95_16}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.94_99}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.94_83}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.94_64}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.9_43}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.9_43}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.94_08}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.93_35}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.93_26}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.92_62}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.89_99}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.89_86}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.89_84}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.88_73}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.88_71} ] , ) # fmt: on @require_torch @slow def A ( self : Any ) -> List[Any]: UpperCAmelCase : Union[str, Any] = "facebook/sam-vit-huge" UpperCAmelCase : int = pipeline('''mask-generation''' , model=lowerCamelCase__ ) UpperCAmelCase : Optional[Any] = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing UpperCAmelCase : Tuple = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(lowerCamelCase__ , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.04_44}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.02_10}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.01_67}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.01_32}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.00_53}, ] , )
23
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : List[str] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = emb.weight.shape _UpperCAmelCase : str = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) _UpperCAmelCase : Optional[int] = emb.weight.data return lin_layer def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=None ): _UpperCAmelCase : int = {} for old_key in state_dict.keys(): _UpperCAmelCase : Tuple = old_key if "moe_layer.experts." in key: if expert_idx is not None: _UpperCAmelCase : Optional[int] = key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" ) else: _UpperCAmelCase : Any = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: _UpperCAmelCase : List[Any] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: _UpperCAmelCase : Tuple = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: _UpperCAmelCase : List[Any] = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: _UpperCAmelCase : List[Any] = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: _UpperCAmelCase : Any = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: _UpperCAmelCase : int = key.replace("final_layer_norm" , "ff_layer_norm" ) _UpperCAmelCase : Tuple = state_dict[old_key] return new_dict def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = WEIGHTS_NAME ): _UpperCAmelCase : Optional[int] = [] _UpperCAmelCase : Optional[Any] = 0 os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) for expert in range(__lowerCAmelCase ): _UpperCAmelCase : Tuple = switch_checkpoint_path + F"""-rank-{expert}.pt""" if os.path.isfile(__lowerCAmelCase ): _UpperCAmelCase : Tuple = torch.load(__lowerCAmelCase )["model"] remove_ignore_keys_(__lowerCAmelCase ) _UpperCAmelCase : Dict = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : List[str] = os.path.join( __lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__lowerCAmelCase )[0]].dtype ) # Add the last block _UpperCAmelCase : Tuple = os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) ) _UpperCAmelCase : Union[str, Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(__lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : Any = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__lowerCAmelCase ) == 1: _UpperCAmelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__lowerCAmelCase , __lowerCAmelCase ) # Otherwise, let's build the index _UpperCAmelCase : Union[str, Any] = {} for idx, shard in enumerate(__lowerCAmelCase ): _UpperCAmelCase : Tuple = weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin""" ) _UpperCAmelCase : List[Any] = os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) for key in shard: _UpperCAmelCase : List[Any] = shard_file # Add the metadata _UpperCAmelCase : Any = {"total_size": total_size} _UpperCAmelCase : List[str] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , "w" , encoding="utf-8" ) as f: _UpperCAmelCase : Tuple = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n" f.write(__lowerCAmelCase ) return metadata, index if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--nllb_moe_checkpoint_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b', type=str, required=False, help='Path to the output pytorch model.', ) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ ,lowerCamelCase__ = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) lowerCamelCase__ = NllbMoeConfig.from_pretrained( 'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) lowerCamelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('Done') model.save_pretrained(args.pytorch_dump_folder_path)
234
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class snake_case_ ( __lowercase ): A_ = 'roberta' def __init__( self : Union[str, Any] , _snake_case : List[Any]=50265 , _snake_case : Dict=768 , _snake_case : Dict=12 , _snake_case : Optional[Any]=12 , _snake_case : Optional[int]=3072 , _snake_case : Dict="gelu" , _snake_case : Optional[Any]=0.1 , _snake_case : List[str]=0.1 , _snake_case : int=512 , _snake_case : Tuple=2 , _snake_case : Dict=0.02 , _snake_case : Optional[Any]=1E-12 , _snake_case : Dict=1 , _snake_case : Optional[int]=0 , _snake_case : Dict=2 , _snake_case : Any="absolute" , _snake_case : Any=True , _snake_case : int=None , **_snake_case : Optional[int] , )->Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) __lowerCAmelCase : Optional[Any] = vocab_size __lowerCAmelCase : Dict = hidden_size __lowerCAmelCase : str = num_hidden_layers __lowerCAmelCase : int = num_attention_heads __lowerCAmelCase : Optional[Any] = hidden_act __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : Optional[int] = hidden_dropout_prob __lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : List[Any] = type_vocab_size __lowerCAmelCase : List[Any] = initializer_range __lowerCAmelCase : Any = layer_norm_eps __lowerCAmelCase : Any = position_embedding_type __lowerCAmelCase : int = use_cache __lowerCAmelCase : List[str] = classifier_dropout class snake_case_ ( __lowercase ): @property def UpperCAmelCase__ ( self : Optional[Any] )->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __lowerCAmelCase : str = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
358
from math import isqrt def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> list[int]: __lowerCAmelCase : Tuple = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = False return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]] def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 10**8 ) -> int: __lowerCAmelCase : int = calculate_prime_numbers(max_number // 2 ) __lowerCAmelCase : List[Any] = 0 __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f'''{solution() = }''')
232
0
"""simple docstring""" def a__ ( __SCREAMING_SNAKE_CASE = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int: try: __lowerCAmelCase: int = int(__SCREAMING_SNAKE_CASE ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) __lowerCAmelCase: List[str] = 1 __lowerCAmelCase: List[str] = 2 while i * i <= n: while n % i == 0: __lowerCAmelCase: Tuple = i n //= i i += 1 if n > 1: __lowerCAmelCase: str = n return int(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(F'''{solution() = }''')
217
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class snake_case : def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : str=1_3 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[int]=None , )-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: Tuple = parent __lowerCAmelCase: Optional[int] = batch_size __lowerCAmelCase: int = seq_length __lowerCAmelCase: Any = is_training __lowerCAmelCase: List[Any] = use_input_mask __lowerCAmelCase: Any = use_token_type_ids __lowerCAmelCase: Dict = use_labels __lowerCAmelCase: Union[str, Any] = vocab_size __lowerCAmelCase: Union[str, Any] = hidden_size __lowerCAmelCase: int = num_hidden_layers __lowerCAmelCase: List[Any] = num_attention_heads __lowerCAmelCase: int = intermediate_size __lowerCAmelCase: Optional[Any] = hidden_act __lowerCAmelCase: Optional[Any] = hidden_dropout_prob __lowerCAmelCase: Optional[int] = attention_probs_dropout_prob __lowerCAmelCase: Any = max_position_embeddings __lowerCAmelCase: Optional[int] = type_vocab_size __lowerCAmelCase: str = type_sequence_label_size __lowerCAmelCase: int = initializer_range __lowerCAmelCase: Dict = num_labels __lowerCAmelCase: Dict = num_choices __lowerCAmelCase: str = scope def lowercase_ ( self : Optional[Any])-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __lowerCAmelCase: List[Any] = None if self.use_input_mask: __lowerCAmelCase: int = random_attention_mask([self.batch_size, self.seq_length]) __lowerCAmelCase: Dict = None if self.use_token_type_ids: __lowerCAmelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __lowerCAmelCase: str = None __lowerCAmelCase: Any = None __lowerCAmelCase: Dict = None if self.use_labels: __lowerCAmelCase: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) __lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , self.num_choices) __lowerCAmelCase: Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : str)-> Dict: '''simple docstring''' return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , ) def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any])-> str: '''simple docstring''' __lowerCAmelCase: List[Any] = OpenLlamaModel(config=UpperCamelCase__) model.to(UpperCamelCase__) model.eval() __lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__) __lowerCAmelCase: int = model(UpperCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , )-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Optional[Any] = True __lowerCAmelCase: Any = OpenLlamaModel(UpperCamelCase__) model.to(UpperCamelCase__) model.eval() __lowerCAmelCase: List[Any] = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) __lowerCAmelCase: Optional[int] = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) __lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , )-> Optional[int]: '''simple docstring''' __lowerCAmelCase: str = OpenLlamaForCausalLM(config=UpperCamelCase__) model.to(UpperCamelCase__) model.eval() __lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowercase_ ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , )-> Tuple: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = True __lowerCAmelCase: Dict = True __lowerCAmelCase: Union[str, Any] = OpenLlamaForCausalLM(config=UpperCamelCase__) model.to(UpperCamelCase__) model.eval() # first forward pass __lowerCAmelCase: Optional[int] = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , ) __lowerCAmelCase: Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size) __lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and __lowerCAmelCase: str = torch.cat([input_ids, next_tokens] , dim=-1) __lowerCAmelCase: List[str] = torch.cat([input_mask, next_mask] , dim=-1) __lowerCAmelCase: Union[str, Any] = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0] __lowerCAmelCase: List[str] = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0] # select random slice __lowerCAmelCase: List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item() __lowerCAmelCase: List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() __lowerCAmelCase: Union[str, Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3)) def lowercase_ ( self : Tuple)-> str: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ): List[Any] = config_and_inputs __lowerCAmelCase: Any = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( __snake_case, __snake_case, __snake_case, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Tuple = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : Any = (OpenLlamaForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ : Optional[int] = ( { """feature-extraction""": OpenLlamaModel, """text-classification""": OpenLlamaForSequenceClassification, """text-generation""": OpenLlamaForCausalLM, """zero-shot""": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : List[str] = False SCREAMING_SNAKE_CASE_ : List[Any] = False def lowercase_ ( self : Dict)-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: int = OpenLlamaModelTester(self) __lowerCAmelCase: Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7) def lowercase_ ( self : List[str])-> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Union[str, Any])-> List[Any]: '''simple docstring''' __lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__) def lowercase_ ( self : int)-> str: '''simple docstring''' __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCAmelCase: Union[str, Any] = type self.model_tester.create_and_check_model(*UpperCamelCase__) def lowercase_ ( self : Tuple)-> Optional[Any]: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase: Dict = 3 __lowerCAmelCase: Optional[Any] = input_dict["input_ids"] __lowerCAmelCase: Optional[Any] = input_ids.ne(1).to(UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCamelCase__) model.to(UpperCamelCase__) model.eval() __lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def lowercase_ ( self : Dict)-> Tuple: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase: int = 3 __lowerCAmelCase: Dict = "single_label_classification" __lowerCAmelCase: str = input_dict["input_ids"] __lowerCAmelCase: Tuple = input_ids.ne(1).to(UpperCamelCase__) __lowerCAmelCase: Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __lowerCAmelCase: List[Any] = OpenLlamaForSequenceClassification(UpperCamelCase__) model.to(UpperCamelCase__) model.eval() __lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def lowercase_ ( self : Optional[int])-> Any: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase: Tuple = 3 __lowerCAmelCase: Any = "multi_label_classification" __lowerCAmelCase: str = input_dict["input_ids"] __lowerCAmelCase: Optional[int] = input_ids.ne(1).to(UpperCamelCase__) __lowerCAmelCase: Optional[int] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) __lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCamelCase__) model.to(UpperCamelCase__) model.eval() __lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip("Open-Llama buffers include complex numbers, which breaks this test") def lowercase_ ( self : Any)-> Tuple: '''simple docstring''' pass @parameterized.expand([("linear",), ("dynamic",)]) def lowercase_ ( self : Any , UpperCamelCase__ : List[str])-> Dict: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase: Any = ids_tensor([1, 1_0] , config.vocab_size) __lowerCAmelCase: Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __lowerCAmelCase: List[Any] = OpenLlamaModel(UpperCamelCase__) original_model.to(UpperCamelCase__) original_model.eval() __lowerCAmelCase: int = original_model(UpperCamelCase__).last_hidden_state __lowerCAmelCase: str = original_model(UpperCamelCase__).last_hidden_state set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __lowerCAmelCase: Dict = {"type": scaling_type, "factor": 10.0} __lowerCAmelCase: List[str] = OpenLlamaModel(UpperCamelCase__) scaled_model.to(UpperCamelCase__) scaled_model.eval() __lowerCAmelCase: Dict = scaled_model(UpperCamelCase__).last_hidden_state __lowerCAmelCase: Any = scaled_model(UpperCamelCase__).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5)) else: self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
217
1
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __a : """simple docstring""" @staticmethod def _lowerCAmelCase ( *lowercase_ : Optional[int] , **lowercase_ : Tuple ): pass @is_pipeline_test @require_torch @require_vision class __a ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def _lowerCAmelCase ( self : int , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] ): UpperCamelCase__ : Tuple =pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) UpperCamelCase__ : Optional[Any] =[ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def _lowerCAmelCase ( self : int , lowercase_ : Union[str, Any] , lowercase_ : int ): UpperCamelCase__ : int =vqa_pipeline(lowercase_ , top_k=1 ) self.assertEqual( lowercase_ , [ [{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}], [{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}], ] , ) @require_torch def _lowerCAmelCase ( self : List[Any] ): UpperCamelCase__ : int =pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) UpperCamelCase__ : Union[str, Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCamelCase__ : Dict ='''How many cats are there?''' UpperCamelCase__ : Dict =vqa_pipeline(image=lowercase_ , question='''How many cats are there?''' , top_k=2 ) self.assertEqual( lowercase_ , [{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}, {'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}] ) UpperCamelCase__ : Optional[int] =vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( lowercase_ , [{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}, {'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}] ) @slow @require_torch def _lowerCAmelCase ( self : Tuple ): UpperCamelCase__ : Optional[Any] =pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' ) UpperCamelCase__ : str ='''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCamelCase__ : str ='''How many cats are there?''' UpperCamelCase__ : str =vqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] ) UpperCamelCase__ : Optional[Any] =vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] ) UpperCamelCase__ : List[str] =vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''' ) def _lowerCAmelCase ( self : Optional[Any] ): pass
157
"""simple docstring""" from __future__ import annotations def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ): '''simple docstring''' if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ): '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ): '''simple docstring''' if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( UpperCAmelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
157
1
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __A ( self : List[Any] ) -> Tuple: SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def __A ( self : Optional[int] ) -> Union[str, Any]: with self.assertRaises(__magic_name__ ): SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def __A ( self : int ) -> Any: with self.assertRaises(__magic_name__ ): SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) ) def __A ( self : str ) -> Any: SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def __A ( self : int ) -> str: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) ) def __A ( self : List[str] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def __A ( self : Any ) -> str: SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) ) self.assertEqual(arr.type , pa.string() ) def __A ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) ) def __A ( self : Any ) -> Union[str, Any]: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) ) def __A ( self : str ) -> List[str]: SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) ) def __A ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def __A ( self : int ) -> Any: import PIL.Image SCREAMING_SNAKE_CASE_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( "datasets.arrow_writer.cast_to_python_objects" , side_effect=__magic_name__ ) as mock_cast_to_python_objects: SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("optimize_list_casting" , __magic_name__ ) self.assertFalse(kwargs["optimize_list_casting"] ) def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = pa.BufferReader(__UpperCamelCase ) if isinstance(__UpperCamelCase , pa.Buffer ) else pa.memory_map(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = pa.ipc.open_stream(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() SCREAMING_SNAKE_CASE_ = pa.schema(__UpperCamelCase ) if fields else None with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer: writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE_ = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def a__ ( ): SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() SCREAMING_SNAKE_CASE_ = Features({"labels": ClassLabel(names=["neg", "pos"] )} ) with ArrowWriter(stream=__UpperCamelCase , features=__UpperCamelCase ) as writer: writer.write({"labels": 0} ) writer.write({"labels": 1} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata SCREAMING_SNAKE_CASE_ = pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE_ = pa.ipc.open_stream(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = f.read_all() SCREAMING_SNAKE_CASE_ = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(__UpperCamelCase ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] ) def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() with ArrowWriter( stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="split_name" , check_duplicates=__UpperCamelCase , ) as writer: with pytest.raises(__UpperCamelCase ): writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() @pytest.mark.parametrize("writer_batch_size" , [None, 2, 1_0] ) def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() with ArrowWriter( stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="split_name" , check_duplicates=__UpperCamelCase , ) as writer: with pytest.raises(__UpperCamelCase ): writer.write({"col_1": "foo", "col_2": 1} , key=1_0 ) writer.write({"col_1": "bar", "col_2": 2} , key=1_0 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() @pytest.mark.parametrize("writer_batch_size" , [None, 2, 1_0] ) def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() with ArrowWriter( stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="split_name" , check_duplicates=__UpperCamelCase , ) as writer: writer.write({"col_1": "foo", "col_2": 1} , key=1 ) writer.write({"col_1": "bar", "col_2": 2} , key=2 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() SCREAMING_SNAKE_CASE_ = pa.schema(__UpperCamelCase ) if fields else None with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) writer.write_batch({"col_1": [], "col_2": []} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE_ = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() SCREAMING_SNAKE_CASE_ = pa.schema(__UpperCamelCase ) if fields else None with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE_ = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 1_0] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() SCREAMING_SNAKE_CASE_ = pa.schema(__UpperCamelCase ) if fields else None with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer: writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) ) writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE_ = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def a__ ( ): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ = {"col_1": pa.string(), "col_2": pa.intaa()} SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , "test.arrow" ) with ArrowWriter(path=__UpperCamelCase , schema=pa.schema(__UpperCamelCase ) ) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata ) _check_output(__UpperCamelCase , 1 ) def a__ ( __UpperCamelCase ): if pa.types.is_list(__UpperCamelCase ): return get_base_dtype(arr_type.value_type ) else: return arr_type def a__ ( __UpperCamelCase , __UpperCamelCase ): if isinstance(lst[0] , __UpperCamelCase ): change_first_primitive_element_in_list(lst[0] , __UpperCamelCase ) else: SCREAMING_SNAKE_CASE_ = value @pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] ) @pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = pa.array(TypedSequence(__UpperCamelCase , optimized_int_type=__UpperCamelCase ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( "col, expected_dtype" , [ ("attention_mask", pa.inta()), ("special_tokens_mask", pa.inta()), ("token_type_ids", pa.inta()), ("input_ids", pa.intaa()), ("other", pa.intaa()), ] , ) @pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # in range SCREAMING_SNAKE_CASE_ = pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications SCREAMING_SNAKE_CASE_ = copy.deepcopy(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("raise_exception" , [False, True] ) def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = str(tmp_path / "dataset-train.arrow" ) try: with ArrowWriter(path=__UpperCamelCase ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = "mock://dataset-train.arrow" with ArrowWriter(path=__UpperCamelCase , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(__UpperCamelCase ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(__UpperCamelCase ) def a__ ( ): SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() with ParquetWriter(stream=__UpperCamelCase ) as writer: writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 SCREAMING_SNAKE_CASE_ = pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE_ = pq.read_table(__UpperCamelCase ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("embed_local_files" , [False, True] ) def a__ ( __UpperCamelCase , __UpperCamelCase ): import PIL.Image SCREAMING_SNAKE_CASE_ = str(tmp_path / "test_image_rgb.jpg" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__UpperCamelCase , format="png" ) SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() with ParquetWriter( stream=__UpperCamelCase , features=Features({"image": Image()} ) , embed_local_files=__UpperCamelCase ) as writer: writer.write({"image": image_path} ) writer.finalize() SCREAMING_SNAKE_CASE_ = pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE_ = pq.read_table(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = pa_table.to_pydict() if embed_local_files: assert isinstance(out["image"][0]["path"] , __UpperCamelCase ) with open(__UpperCamelCase , "rb" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def a__ ( ): SCREAMING_SNAKE_CASE_ = pa.schema([pa.field("col_1" , pa.string() , nullable=__UpperCamelCase )] ) SCREAMING_SNAKE_CASE_ = pa.BufferOutputStream() with ArrowWriter(stream=__UpperCamelCase ) as writer: writer._build_writer(inferred_schema=__UpperCamelCase ) assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
118
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = DebertaTokenizer lowerCamelCase__ = True lowerCamelCase__ = DebertaTokenizerFast def __A ( self : List[Any] ) -> Dict: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "[UNK]", ] SCREAMING_SNAKE_CASE_ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) SCREAMING_SNAKE_CASE_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] SCREAMING_SNAKE_CASE_ = {"unk_token": "[UNK]"} SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__magic_name__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__magic_name__ ) ) def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) def __A ( self : str , __magic_name__ : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE_ = "lower newer" SCREAMING_SNAKE_CASE_ = "lower newer" return input_text, output_text def __A ( self : Union[str, Any] ) -> str: SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = "lower newer" SCREAMING_SNAKE_CASE_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def __A ( self : Optional[int] ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = self.get_tokenizer() SCREAMING_SNAKE_CASE_ = tokenizer("Hello" , "World" ) SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["token_type_ids"] , __magic_name__ ) @slow def __A ( self : Any ) -> Any: SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained("microsoft/deberta-base" ) SCREAMING_SNAKE_CASE_ = tokenizer.encode("sequence builders" , add_special_tokens=__magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer.encode( "sequence builders" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def __A ( self : Tuple ) -> str: SCREAMING_SNAKE_CASE_ = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained("microsoft/deberta-base" ) SCREAMING_SNAKE_CASE_ = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ ) SCREAMING_SNAKE_CASE_ = [tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) for seq in encoding["input_ids"]] # fmt: off SCREAMING_SNAKE_CASE_ = { "input_ids": [ [1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2] ], "token_type_ids": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on SCREAMING_SNAKE_CASE_ = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] self.assertDictEqual(encoding.data , __magic_name__ ) for expected, decoded in zip(__magic_name__ , __magic_name__ ): self.assertEqual(__magic_name__ , __magic_name__ )
118
1
"""simple docstring""" class __a : """simple docstring""" def __init__( self : List[str] ): UpperCamelCase__ : dict[str, TrieNode] ={} # Mapping from char to TrieNode UpperCamelCase__ : Union[str, Any] =False def _lowerCAmelCase ( self : Tuple , lowercase_ : list[str] ): for word in words: self.insert(lowercase_ ) def _lowerCAmelCase ( self : List[str] , lowercase_ : str ): UpperCamelCase__ : int =self for char in word: if char not in curr.nodes: UpperCamelCase__ : Union[str, Any] =TrieNode() UpperCamelCase__ : List[str] =curr.nodes[char] UpperCamelCase__ : Optional[int] =True def _lowerCAmelCase ( self : Optional[Any] , lowercase_ : str ): UpperCamelCase__ : List[str] =self for char in word: if char not in curr.nodes: return False UpperCamelCase__ : Union[str, Any] =curr.nodes[char] return curr.is_leaf def _lowerCAmelCase ( self : List[str] , lowercase_ : str ): def _delete(lowercase_ : TrieNode , lowercase_ : str , lowercase_ : int ) -> bool: if index == len(lowercase_ ): # If word does not exist if not curr.is_leaf: return False UpperCamelCase__ : Optional[Any] =False return len(curr.nodes ) == 0 UpperCamelCase__ : int =word[index] UpperCamelCase__ : Tuple =curr.nodes.get(lowercase_ ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted UpperCamelCase__ : Dict =_delete(lowercase_ , lowercase_ , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , lowercase_ , 0 ) def _lowerCAmelCase ( UpperCAmelCase : TrieNode , UpperCAmelCase : str ): '''simple docstring''' if node.is_leaf: print(UpperCAmelCase , end=''' ''' ) for key, value in node.nodes.items(): print_words(UpperCAmelCase , word + key ) def _lowerCAmelCase ( ): '''simple docstring''' UpperCamelCase__ : Dict ='''banana bananas bandana band apple all beast'''.split() UpperCamelCase__ : Tuple =TrieNode() root.insert_many(UpperCAmelCase ) # print_words(root, "") assert all(root.find(UpperCAmelCase ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def _lowerCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : bool ): '''simple docstring''' print(str(UpperCAmelCase ) , '''works!''' if passes else '''doesn\'t work :(''' ) def _lowerCAmelCase ( ): '''simple docstring''' assert test_trie() def _lowerCAmelCase ( ): '''simple docstring''' print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
354
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # General docstring _SCREAMING_SNAKE_CASE : Union[str, Any] = """ResNetConfig""" # Base docstring _SCREAMING_SNAKE_CASE : str = """microsoft/resnet-50""" _SCREAMING_SNAKE_CASE : List[Any] = [1, 2_0_4_8, 7, 7] # Image classification docstring _SCREAMING_SNAKE_CASE : Tuple = """microsoft/resnet-50""" _SCREAMING_SNAKE_CASE : Union[str, Any] = """tiger cat""" _SCREAMING_SNAKE_CASE : Optional[Any] = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class __a ( nn.Module ): """simple docstring""" def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : str = "relu" ): super().__init__() UpperCamelCase__ : Optional[Any] =nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , bias=lowercase_ ) UpperCamelCase__ : Tuple =nn.BatchNormad(lowercase_ ) UpperCamelCase__ : int =ACTaFN[activation] if activation is not None else nn.Identity() def _lowerCAmelCase ( self : Dict , lowercase_ : Tensor ): UpperCamelCase__ : List[Any] =self.convolution(lowercase_ ) UpperCamelCase__ : Union[str, Any] =self.normalization(lowercase_ ) UpperCamelCase__ : Optional[int] =self.activation(lowercase_ ) return hidden_state class __a ( nn.Module ): """simple docstring""" def __init__( self : Tuple , lowercase_ : ResNetConfig ): super().__init__() UpperCamelCase__ : Any =ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) UpperCamelCase__ : Tuple =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) UpperCamelCase__ : Any =config.num_channels def _lowerCAmelCase ( self : str , lowercase_ : Tensor ): UpperCamelCase__ : Optional[Any] =pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) UpperCamelCase__ : Dict =self.embedder(lowercase_ ) UpperCamelCase__ : Union[str, Any] =self.pooler(lowercase_ ) return embedding class __a ( nn.Module ): """simple docstring""" def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ): super().__init__() UpperCamelCase__ : int =nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ ) UpperCamelCase__ : Optional[int] =nn.BatchNormad(lowercase_ ) def _lowerCAmelCase ( self : Tuple , lowercase_ : Tensor ): UpperCamelCase__ : Dict =self.convolution(lowercase_ ) UpperCamelCase__ : Dict =self.normalization(lowercase_ ) return hidden_state class __a ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 , lowercase_ : str = "relu" ): super().__init__() UpperCamelCase__ : Optional[Any] =in_channels != out_channels or stride != 1 UpperCamelCase__ : str =( ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) UpperCamelCase__ : List[str] =nn.Sequential( ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , activation=lowercase_ ) , ) UpperCamelCase__ : Any =ACTaFN[activation] def _lowerCAmelCase ( self : str , lowercase_ : Tuple ): UpperCamelCase__ : Any =hidden_state UpperCamelCase__ : Union[str, Any] =self.layer(lowercase_ ) UpperCamelCase__ : str =self.shortcut(lowercase_ ) hidden_state += residual UpperCamelCase__ : str =self.activation(lowercase_ ) return hidden_state class __a ( nn.Module ): """simple docstring""" def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 , lowercase_ : str = "relu" , lowercase_ : int = 4 ): super().__init__() UpperCamelCase__ : Optional[Any] =in_channels != out_channels or stride != 1 UpperCamelCase__ : Union[str, Any] =out_channels // reduction UpperCamelCase__ : str =( ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) UpperCamelCase__ : int =nn.Sequential( ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 ) , ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) UpperCamelCase__ : List[Any] =ACTaFN[activation] def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ): UpperCamelCase__ : Dict =hidden_state UpperCamelCase__ : str =self.layer(lowercase_ ) UpperCamelCase__ : Tuple =self.shortcut(lowercase_ ) hidden_state += residual UpperCamelCase__ : Optional[int] =self.activation(lowercase_ ) return hidden_state class __a ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowercase_ : ResNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ): super().__init__() UpperCamelCase__ : Dict =ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer UpperCamelCase__ : Union[str, Any] =nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowercase_ , lowercase_ , stride=lowercase_ , activation=config.hidden_act ) , *[layer(lowercase_ , lowercase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def _lowerCAmelCase ( self : Tuple , lowercase_ : Tensor ): UpperCamelCase__ : Optional[Any] =input for layer in self.layers: UpperCamelCase__ : Tuple =layer(lowercase_ ) return hidden_state class __a ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , lowercase_ : ResNetConfig ): super().__init__() UpperCamelCase__ : Optional[Any] =nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) UpperCamelCase__ : int =zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ): self.stages.append(ResNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) ) def _lowerCAmelCase ( self : Dict , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ): UpperCamelCase__ : int =() if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCamelCase__ : Union[str, Any] =hidden_states + (hidden_state,) UpperCamelCase__ : List[str] =stage_module(lowercase_ ) if output_hidden_states: UpperCamelCase__ : Optional[Any] =hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowercase_ , hidden_states=lowercase_ , ) class __a ( snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ResNetConfig SCREAMING_SNAKE_CASE_ = 'resnet' SCREAMING_SNAKE_CASE_ = 'pixel_values' SCREAMING_SNAKE_CASE_ = True def _lowerCAmelCase ( self : str , lowercase_ : Optional[int] ): if isinstance(lowercase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def _lowerCAmelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict=False ): if isinstance(lowercase_ , lowercase_ ): UpperCamelCase__ : str =value _SCREAMING_SNAKE_CASE : int = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _SCREAMING_SNAKE_CASE : Optional[int] = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare ResNet model outputting raw features without any specific head on top.', snake_case__, ) class __a ( snake_case__ ): """simple docstring""" def __init__( self : Union[str, Any] , lowercase_ : List[Any] ): super().__init__(lowercase_ ) UpperCamelCase__ : Dict =config UpperCamelCase__ : str =ResNetEmbeddings(lowercase_ ) UpperCamelCase__ : str =ResNetEncoder(lowercase_ ) UpperCamelCase__ : Union[str, Any] =nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowerCAmelCase ( self : List[Any] , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ): UpperCamelCase__ : Union[str, Any] =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase__ : Optional[Any] =self.embedder(lowercase_ ) UpperCamelCase__ : Union[str, Any] =self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) UpperCamelCase__ : int =encoder_outputs[0] UpperCamelCase__ : List[Any] =self.pooler(lowercase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', snake_case__, ) class __a ( snake_case__ ): """simple docstring""" def __init__( self : Dict , lowercase_ : Union[str, Any] ): super().__init__(lowercase_ ) UpperCamelCase__ : Any =config.num_labels UpperCamelCase__ : Dict =ResNetModel(lowercase_ ) # classification head UpperCamelCase__ : Any =nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowerCAmelCase ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ): UpperCamelCase__ : Dict =return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase__ : List[Any] =self.resnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) UpperCamelCase__ : Tuple =outputs.pooler_output if return_dict else outputs[1] UpperCamelCase__ : Union[str, Any] =self.classifier(lowercase_ ) UpperCamelCase__ : int =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCamelCase__ : List[str] ='''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCamelCase__ : Dict ='''single_label_classification''' else: UpperCamelCase__ : str ='''multi_label_classification''' if self.config.problem_type == "regression": UpperCamelCase__ : Union[str, Any] =MSELoss() if self.num_labels == 1: UpperCamelCase__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCamelCase__ : Dict =loss_fct(lowercase_ , lowercase_ ) elif self.config.problem_type == "single_label_classification": UpperCamelCase__ : List[Any] =CrossEntropyLoss() UpperCamelCase__ : List[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCamelCase__ : Optional[Any] =BCEWithLogitsLoss() UpperCamelCase__ : List[str] =loss_fct(lowercase_ , lowercase_ ) if not return_dict: UpperCamelCase__ : Tuple =(logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( '\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ', snake_case__, ) class __a ( snake_case__, snake_case__ ): """simple docstring""" def __init__( self : str , lowercase_ : List[Any] ): super().__init__(lowercase_ ) super()._init_backbone(lowercase_ ) UpperCamelCase__ : str =[config.embedding_size] + config.hidden_sizes UpperCamelCase__ : Optional[int] =ResNetEmbeddings(lowercase_ ) UpperCamelCase__ : Dict =ResNetEncoder(lowercase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC ) def _lowerCAmelCase ( self : int , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ): UpperCamelCase__ : Union[str, Any] =return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase__ : Union[str, Any] =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase__ : Any =self.embedder(lowercase_ ) UpperCamelCase__ : Optional[Any] =self.encoder(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) UpperCamelCase__ : str =outputs.hidden_states UpperCamelCase__ : Optional[int] =() for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: UpperCamelCase__ : int =(feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowercase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase_ , )
157
0
def a__ ( A_ ): '''simple docstring''' if collection == []: return [] # get some information about the collection __magic_name__ = len(A_ ) __magic_name__ = max(A_ ) __magic_name__ = min(A_ ) # create the counting array __magic_name__ = coll_max + 1 - coll_min __magic_name__ = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1, A_ ): __magic_name__ = counting_arr[i] + counting_arr[i - 1] # create the output collection __magic_name__ = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0, A_ ) ): __magic_name__ = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def a__ ( A_ ): '''simple docstring''' return "".join([chr(A_ ) for i in counting_sort([ord(A_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt" __lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : Dict = [int(item) for item in user_input.split(',')] print(counting_sort(unsorted))
88
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
0
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class __lowerCAmelCase : def __init__( self :List[str] , __magic_name__ :Any , __magic_name__ :List[Any]=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Any=True , __magic_name__ :Tuple=True , __magic_name__ :Tuple=True , __magic_name__ :Dict=True , __magic_name__ :Optional[int]=99 , __magic_name__ :Tuple=[1, 1, 2] , __magic_name__ :Any=1 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Dict=4 , __magic_name__ :List[str]=8 , __magic_name__ :Tuple=37 , __magic_name__ :Any="gelu_new" , __magic_name__ :int=0.1 , __magic_name__ :List[str]=0.1 , __magic_name__ :List[Any]=0.0 , __magic_name__ :Any=512 , __magic_name__ :int=3 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[int]=3 , __magic_name__ :Any=4 , __magic_name__ :str=None , __magic_name__ :Tuple=False , ): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = block_sizes a = num_decoder_layers a = d_model a = n_head a = d_head a = d_inner a = hidden_act a = hidden_dropout a = attention_dropout a = activation_dropout a = max_position_embeddings a = type_vocab_size a = 2 a = num_labels a = num_choices a = scope a = initializer_std # Used in the tests to check the size of the first attention layer a = n_head # Used in the tests to check the size of the first hidden state a = self.d_model # Used in the tests to check the number of output hidden states/attentions a = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: a = self.num_hidden_layers + 2 def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :Dict , __magic_name__ :Dict , __magic_name__ :str , __magic_name__ :Optional[Any] , ): '''simple docstring''' a = TFFunnelModel(config=__magic_name__ ) a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} a = model(__magic_name__ ) a = [input_ids, input_mask] a = model(__magic_name__ ) a = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) a = False a = TFFunnelModel(config=__magic_name__ ) a = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) a = False a = TFFunnelModel(config=__magic_name__ ) a = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self :Any , __magic_name__ :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :List[str] , __magic_name__ :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :str , ): '''simple docstring''' a = TFFunnelBaseModel(config=__magic_name__ ) a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} a = model(__magic_name__ ) a = [input_ids, input_mask] a = model(__magic_name__ ) a = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) a = False a = TFFunnelBaseModel(config=__magic_name__ ) a = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) a = False a = TFFunnelBaseModel(config=__magic_name__ ) a = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self :str , __magic_name__ :List[Any] , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple , __magic_name__ :int , ): '''simple docstring''' a = TFFunnelForPreTraining(config=__magic_name__ ) a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} a = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Any , __magic_name__ :Any , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :str , ): '''simple docstring''' a = TFFunnelForMaskedLM(config=__magic_name__ ) a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} a = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Any , __magic_name__ :List[str] , __magic_name__ :Any , __magic_name__ :Optional[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :int , ): '''simple docstring''' a = self.num_labels a = TFFunnelForSequenceClassification(config=__magic_name__ ) a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} a = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :Tuple , __magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Optional[int] , ): '''simple docstring''' a = self.num_choices a = TFFunnelForMultipleChoice(config=__magic_name__ ) a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) a = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) a = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } a = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self :str , __magic_name__ :Dict , __magic_name__ :List[str] , __magic_name__ :Any , __magic_name__ :Tuple , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :Any , ): '''simple docstring''' a = self.num_labels a = TFFunnelForTokenClassification(config=__magic_name__ ) a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} a = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self :Dict , __magic_name__ :str , __magic_name__ :str , __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Any , __magic_name__ :List[str] , __magic_name__ :List[str] , ): '''simple docstring''' a = TFFunnelForQuestionAnswering(config=__magic_name__ ) a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} a = model(__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) UpperCamelCase__ = ( { '''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel), '''fill-mask''': TFFunnelForMaskedLM, '''question-answering''': TFFunnelForQuestionAnswering, '''text-classification''': TFFunnelForSequenceClassification, '''token-classification''': TFFunnelForTokenClassification, '''zero-shot''': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = TFFunnelModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__magic_name__ ) def lowerCamelCase__ ( self :str ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__magic_name__ ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def lowerCamelCase__ ( self :str ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @require_tf class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) UpperCamelCase__ = False UpperCamelCase__ = False def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = TFFunnelModelTester(self , base=__magic_name__ ) a = ConfigTester(self , config_class=__magic_name__ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*__magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
347
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : int = { "shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json", # See all Nat models at https://huggingface.co/models?filter=nat } class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): UpperCamelCase__ = '''nat''' UpperCamelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ): '''simple docstring''' super().__init__(**__magic_name__ ) a = patch_size a = num_channels a = embed_dim a = depths a = len(__magic_name__ ) a = num_heads a = kernel_size a = mlp_ratio a = qkv_bias a = hidden_dropout_prob a = attention_probs_dropout_prob a = drop_path_rate a = hidden_act a = layer_norm_eps a = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) ) a = layer_scale_init_value a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
347
1
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging __lowerCamelCase = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] __lowerCamelCase = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse("""0.9.0"""): raise Exception("""requires fairseq >= 0.9.0""") logging.set_verbosity_info() __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = ' Hello world! cécé herlolip' __lowerCamelCase = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def UpperCamelCase ( __lowerCamelCase : str ): snake_case : Union[str, Any] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase ) def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int ): snake_case : str = dct.pop(_lowerCamelCase ) snake_case : Dict = val def UpperCamelCase ( __lowerCamelCase : Dict ): snake_case : Tuple = torch.load(_lowerCamelCase , map_location="cpu" ) snake_case : List[Any] = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def UpperCamelCase ( __lowerCamelCase : Tuple ): snake_case : Tuple = emb.weight.shape snake_case : str = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase ) snake_case : List[Any] = emb.weight.data return lin_layer @torch.no_grad() def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=None ): if not os.path.exists(_lowerCamelCase ): snake_case : str = torch.hub.load("pytorch/fairseq" , _lowerCamelCase ).eval() else: snake_case : Dict = load_xsum_checkpoint(_lowerCamelCase ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: snake_case : Dict = checkpoint_path.replace("." , "-" ) snake_case : Optional[int] = BartConfig.from_pretrained(_lowerCamelCase ) snake_case : int = bart.encode(_lowerCamelCase ).unsqueeze(0 ) snake_case : Union[str, Any] = BartTokenizer.from_pretrained(_lowerCamelCase ).encode(_lowerCamelCase , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(_lowerCamelCase , _lowerCamelCase ).all(): raise ValueError( f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" ) if checkpoint_path == "bart.large.mnli": snake_case : List[str] = bart.state_dict() remove_ignore_keys_(_lowerCamelCase ) snake_case : Union[str, Any] = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) snake_case : Optional[Any] = BartForSequenceClassification(_lowerCamelCase ).eval() model.load_state_dict(_lowerCamelCase ) snake_case : Any = bart.predict("mnli" , _lowerCamelCase , return_logits=_lowerCamelCase ) snake_case : Any = model(_lowerCamelCase )[0] # logits else: # no classification heads to worry about snake_case : Any = bart.model.state_dict() remove_ignore_keys_(_lowerCamelCase ) snake_case : Any = state_dict["decoder.embed_tokens.weight"] snake_case : List[Any] = bart.extract_features(_lowerCamelCase ) if hf_checkpoint_name == "facebook/bart-large": snake_case : Optional[Any] = BartModel(_lowerCamelCase ).eval() model.load_state_dict(_lowerCamelCase ) snake_case : Optional[int] = model(_lowerCamelCase ).model[0] else: snake_case : Dict = BartForConditionalGeneration(_lowerCamelCase ).eval() # an existing summarization ckpt model.model.load_state_dict(_lowerCamelCase ) if hasattr(_lowerCamelCase , "lm_head" ): snake_case : Any = make_linear_from_emb(model.model.shared ) snake_case : Union[str, Any] = model.model(_lowerCamelCase )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem.""" ) parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum""" ) __lowerCamelCase = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
59
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path lowercase : Optional[int] = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) lowercase : Optional[Any] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} lowercase : str = 'zero2' lowercase : Optional[int] = 'zero3' lowercase : Optional[Any] = [ZEROa, ZEROa] def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]) -> int: '''simple docstring''' __UpperCamelCase : Union[str, Any] = parameterized.to_safe_name("_".join(str(_lowerCamelCase) for x in param.args)) return F'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test lowercase : List[str] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCamelCase__ ( __lowercase): '''simple docstring''' @parameterized.expand(a , name_func=a ) def _lowerCamelCase ( self :Dict , a :Optional[Any] , a :str ) -> Optional[int]: self.run_and_check( stage=a , model=a , distributed=a , fpaa=a , ) @require_torch_multi_gpu @parameterized.expand(a , name_func=a ) def _lowerCamelCase ( self :List[str] , a :str , a :str ) -> List[Any]: self.run_and_check( stage=a , model=a , distributed=a , fpaa=a , ) @parameterized.expand(a , name_func=a ) def _lowerCamelCase ( self :List[Any] , a :List[str] , a :int ) -> Optional[int]: self.run_and_check( stage=a , model=a , distributed=a , fpaa=a , ) @require_torch_multi_gpu @parameterized.expand(a , name_func=a ) def _lowerCamelCase ( self :List[str] , a :List[Any] , a :Dict ) -> int: self.run_and_check( stage=a , model=a , distributed=a , fpaa=a , ) def _lowerCamelCase ( self :Any , a :List[str] ) -> Optional[Any]: # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def _lowerCamelCase ( self :Optional[Any] , a :str , a :str , a :int = 1_0 , a :bool = True , a :bool = True , a :bool = True , ) -> Any: __UpperCamelCase : Optional[Any] = models[model] __UpperCamelCase : List[Any] = self.run_trainer( stage=a , model_name=a , eval_steps=a , num_train_epochs=1 , distributed=a , fpaa=a , ) self.do_checks(a ) return output_dir def _lowerCamelCase ( self :List[str] , a :str , a :str , a :int = 1_0 , a :int = 1 , a :bool = True , a :bool = True , ) -> Dict: __UpperCamelCase : int = self.get_auto_remove_tmp_dir("./xxx" , after=a ) __UpperCamelCase : int = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(a )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(["--fp16"] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __UpperCamelCase : Dict = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() __UpperCamelCase : int = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] __UpperCamelCase : Optional[Any] = self.get_launcher(a ) __UpperCamelCase : Optional[int] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(a , env=self.get_env() ) return output_dir def _lowerCamelCase ( self :Any , a :List[Any]=False ) -> List[Any]: # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) __UpperCamelCase : List[Any] = min(2 , get_gpu_count() ) if distributed else 1 return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
232
0
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowercase : int = """src/transformers""" _lowercase : int = """docs/source/en""" _lowercase : Union[str, Any] = """.""" def lowerCamelCase__ ( A : Optional[Any] , A : Tuple , A : Tuple ): '''simple docstring''' with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase = f.readlines() # Find the start prompt. UpperCAmelCase = 0 while not lines[start_index].startswith(A ): start_index += 1 start_index += 1 UpperCAmelCase = start_index while not lines[end_index].startswith(A ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowercase : Tuple = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. _lowercase : Any = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") _lowercase : Tuple = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowercase : Dict = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. _lowercase : int = direct_transformers_import(TRANSFORMERS_PATH) def lowerCamelCase__ ( A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A ) return [m.group(0 ) for m in matches] def lowerCamelCase__ ( A : List[Any] , A : List[Any] ): '''simple docstring''' UpperCAmelCase = 2 if text == '''✅''' or text == '''❌''' else len(A ) UpperCAmelCase = (width - text_length) // 2 UpperCAmelCase = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES UpperCAmelCase = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } UpperCAmelCase = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. UpperCAmelCase = collections.defaultdict(A ) UpperCAmelCase = collections.defaultdict(A ) UpperCAmelCase = collections.defaultdict(A ) UpperCAmelCase = collections.defaultdict(A ) UpperCAmelCase = collections.defaultdict(A ) # Let's lookup through all transformers object (once). for attr_name in dir(A ): UpperCAmelCase = None if attr_name.endswith('''Tokenizer''' ): UpperCAmelCase = slow_tokenizers UpperCAmelCase = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): UpperCAmelCase = fast_tokenizers UpperCAmelCase = attr_name[:-13] elif _re_tf_models.match(A ) is not None: UpperCAmelCase = tf_models UpperCAmelCase = _re_tf_models.match(A ).groups()[0] elif _re_flax_models.match(A ) is not None: UpperCAmelCase = flax_models UpperCAmelCase = _re_flax_models.match(A ).groups()[0] elif _re_pt_models.match(A ) is not None: UpperCAmelCase = pt_models UpperCAmelCase = _re_pt_models.match(A ).groups()[0] if lookup_dict is not None: while len(A ) > 0: if attr_name in model_name_to_prefix.values(): UpperCAmelCase = True break # Try again after removing the last word in the name UpperCAmelCase = ''''''.join(camel_case_split(A )[:-1] ) # Let's build that table! UpperCAmelCase = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) UpperCAmelCase = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). UpperCAmelCase = [len(A ) + 2 for c in columns] UpperCAmelCase = max([len(A ) for name in model_names] ) + 2 # Build the table per se UpperCAmelCase = '''|''' + '''|'''.join([_center_text(A , A ) for c, w in zip(A , A )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" UpperCAmelCase = {True: '''✅''', False: '''❌'''} for name in model_names: UpperCAmelCase = model_name_to_prefix[name] UpperCAmelCase = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(A , A ) for l, w in zip(A , A )] ) + "|\n" return table def lowerCamelCase__ ( A : Any=False ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = _find_text_in_file( filename=os.path.join(A , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) UpperCAmelCase = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(A , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": _lowercase : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _lowercase : Tuple = parser.parse_args() check_model_table(args.fix_and_overwrite)
91
'''simple docstring''' def lowerCamelCase__ ( A : int , A : int ): '''simple docstring''' return int(input_a == input_a == 0 ) def lowerCamelCase__ ( ): '''simple docstring''' print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
91
1
from __future__ import annotations def _UpperCamelCase ( snake_case__, snake_case__ ) -> float: __UpperCAmelCase : int = sorted(numsa + numsa ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = divmod(len(snake_case__ ), 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _snake_case = [float(x) for x in input('''Enter the elements of first array: ''').split()] _snake_case = [float(x) for x in input('''Enter the elements of second array: ''').split()] print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
157
def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : Union[str, Any] = abs(snake_case__ ) __UpperCAmelCase : Dict = 0 while n > 0: res += n % 10 n //= 10 return res def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : Tuple = abs(snake_case__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def _UpperCamelCase ( snake_case__ ) -> int: return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) ) def _UpperCamelCase ( ) -> None: from collections.abc import Callable from timeit import timeit def benchmark_a_function(snake_case__, snake_case__ ) -> None: __UpperCAmelCase : Union[str, Any] = f'''{func.__name__}({value})''' __UpperCAmelCase : List[str] = timeit(f'''__main__.{call}''', setup="import __main__" ) print(f'''{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds''' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(snake_case__, snake_case__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
157
1
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=3.6 ): __lowercase= tokenizer __lowercase= tokenizer.bos_token_id __lowercase= dataset __lowercase= seq_length __lowercase= seq_length * chars_per_token * num_of_sequences def __iter__(self ): __lowercase= iter(self.dataset ) __lowercase= True while more_examples: __lowercase, __lowercase= [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(lowerCAmelCase )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __lowercase= False break __lowercase= tokenizer(lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(lowerCAmelCase ) , self.seq_length ): __lowercase= all_token_ids[i : i + self.seq_length] if len(lowerCAmelCase ) == self.seq_length: yield torch.tensor(lowerCAmelCase ) def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' __lowercase= {'streaming': True} __lowercase= load_dataset(args.dataset_name , split='train' , **lowercase__ ) __lowercase= ConstantLengthDataset(lowercase__ , lowercase__ , seq_length=args.seq_length ) __lowercase= DataLoader(lowercase__ , batch_size=args.batch_size ) return eval_dataloader def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' model.eval() __lowercase= [] for step, batch in enumerate(lowercase__ ): with torch.no_grad(): __lowercase= model(lowercase__ , labels=lowercase__ ) __lowercase= outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(lowercase__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __lowercase= torch.mean(torch.cat(lowercase__ ) ) try: __lowercase= torch.exp(lowercase__ ) except OverflowError: __lowercase= float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator lowerCAmelCase = Accelerator() # Parse configuration lowerCAmelCase = HfArgumentParser(EvaluationArguments) lowerCAmelCase = parser.parse_args() set_seed(args.seed) # Logging lowerCAmelCase = logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer lowerCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt) lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader lowerCAmelCase = create_dataloader(args) # Prepare everything with our `accelerator`. lowerCAmelCase ,lowerCAmelCase = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') lowerCAmelCase ,lowerCAmelCase = evaluate(args) logger.info(F'loss/eval: {eval_loss}, perplexity: {perplexity}')
304
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCAmelCase = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class A ( A_ ): UpperCamelCase_ : Optional[int] ='''albert''' def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= embedding_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_hidden_groups __lowercase= num_attention_heads __lowercase= inner_group_num __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= classifier_dropout_prob __lowercase= position_embedding_type class A ( A_ ): @property def _A (self ): if self.task == "multiple-choice": __lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase= {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
304
1
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( """This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): """simple docstring""" warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , """sklearn""" ) return (preds == labels).mean() def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , """sklearn""" ) __a = simple_accuracy(snake_case__ , snake_case__ ) __a = fa_score(y_true=snake_case__ , y_pred=snake_case__ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , """sklearn""" ) __a = pearsonr(snake_case__ , snake_case__ )[0] __a = spearmanr(snake_case__ , snake_case__ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , """sklearn""" ) assert len(snake_case__ ) == len(snake_case__ ), f"Predictions and labels have mismatched lengths {len(snake_case__ )} and {len(snake_case__ )}" if task_name == "cola": return {"mcc": matthews_corrcoef(snake_case__ , snake_case__ )} elif task_name == "sst-2": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "mrpc": return acc_and_fa(snake_case__ , snake_case__ ) elif task_name == "sts-b": return pearson_and_spearman(snake_case__ , snake_case__ ) elif task_name == "qqp": return acc_and_fa(snake_case__ , snake_case__ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "qnli": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "rte": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "wnli": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "hans": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} else: raise KeyError(snake_case__ ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , """sklearn""" ) if len(snake_case__ ) != len(snake_case__ ): raise ValueError(f"Predictions and labels have mismatched lengths {len(snake_case__ )} and {len(snake_case__ )}" ) if task_name == "xnli": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} else: raise KeyError(snake_case__ )
302
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def _UpperCamelCase ( snake_case__ ) -> List[str]: return 1.0 / (1.0 + np.exp(-_outputs )) def _UpperCamelCase ( snake_case__ ) -> Optional[int]: __UpperCAmelCase : List[str] = np.max(_outputs, axis=-1, keepdims=snake_case__ ) __UpperCAmelCase : Dict = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=snake_case__ ) class _snake_case ( _lowercase ): lowerCamelCase__: Optional[Any] = "sigmoid" lowerCamelCase__: Dict = "softmax" lowerCamelCase__: Optional[int] = "none" @add_end_docstrings( _lowercase , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , ) class _snake_case ( _lowercase ): lowerCamelCase__: List[Any] = False lowerCamelCase__: Any = ClassificationFunction.NONE def __init__( self: Union[str, Any] , **__lowerCamelCase: List[Any] ) -> Optional[int]: super().__init__(**__lowerCamelCase ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: str="" , **__lowerCamelCase: str ) -> Tuple: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" __UpperCAmelCase : Optional[int] = tokenizer_kwargs __UpperCAmelCase : str = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: __UpperCAmelCase : List[Any] = self.model.config.return_all_scores if isinstance(__lowerCamelCase , __lowerCamelCase ) or top_k is None: __UpperCAmelCase : Dict = top_k __UpperCAmelCase : str = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , __lowerCamelCase , ) if return_all_scores: __UpperCAmelCase : Any = None else: __UpperCAmelCase : Union[str, Any] = 1 if isinstance(__lowerCamelCase , __lowerCamelCase ): __UpperCAmelCase : Any = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: __UpperCAmelCase : Any = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self: Any , *__lowerCamelCase: Dict , **__lowerCamelCase: int ) -> Dict: __UpperCAmelCase : Any = super().__call__(*__lowerCamelCase , **__lowerCamelCase ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. __UpperCAmelCase : Optional[Any] = "top_k" not in kwargs if isinstance(args[0] , __lowerCamelCase ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Dict , **__lowerCamelCase: Optional[int] ) -> Dict[str, GenericTensor]: __UpperCAmelCase : Tuple = self.framework if isinstance(__lowerCamelCase , __lowerCamelCase ): return self.tokenizer(**__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) elif isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) == 1 and isinstance(inputs[0] , __lowerCamelCase ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__lowerCamelCase , **__lowerCamelCase ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[Any] ) -> List[Any]: return self.model(**__lowerCamelCase ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Tuple , __lowerCamelCase: List[str]=None , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: int=True ) -> Dict: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: __UpperCAmelCase : Union[str, Any] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: __UpperCAmelCase : str = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: __UpperCAmelCase : Any = self.model.config.function_to_apply else: __UpperCAmelCase : Optional[Any] = ClassificationFunction.NONE __UpperCAmelCase : Tuple = model_outputs["logits"][0] __UpperCAmelCase : Optional[int] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: __UpperCAmelCase : Optional[Any] = sigmoid(__lowerCamelCase ) elif function_to_apply == ClassificationFunction.SOFTMAX: __UpperCAmelCase : Any = softmax(__lowerCamelCase ) elif function_to_apply == ClassificationFunction.NONE: __UpperCAmelCase : str = outputs else: raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} __UpperCAmelCase : int = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCamelCase ) ] if not _legacy: dict_scores.sort(key=lambda __lowerCamelCase : x["score"] , reverse=__lowerCamelCase ) if top_k is not None: __UpperCAmelCase : Tuple = dict_scores[:top_k] return dict_scores
157
0
import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers A : Tuple = float("nan") class _UpperCamelCase : '''simple docstring''' def __init__( self , __a ): __lowerCAmelCase = sys.stdout __lowerCAmelCase = open(__a , "a" ) def __getattr__( self , __a ): return getattr(self.stdout , __a ) def snake_case ( self , __a ): self.stdout.write(__a ) # strip tqdm codes self.file.write(re.sub(R"^.*\r" , "" , __a , 0 , re.M ) ) def _lowerCamelCase ( _UpperCamelCase=80 , _UpperCamelCase=False ): '''simple docstring''' __lowerCAmelCase = [] # deal with critical env vars __lowerCAmelCase = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: __lowerCAmelCase = os.environ.get(UpperCamelCase__ , UpperCamelCase__ ) if val is not None: cmd.append(f"{key}={val}" ) # python executable (not always needed if the script is executable) __lowerCAmelCase = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(UpperCamelCase__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes __lowerCAmelCase = [] __lowerCAmelCase = '''''' while len(UpperCamelCase__ ) > 0: current_line += f"{cmd.pop(0 )} " if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCamelCase__ ) __lowerCAmelCase = '''''' return "\\\n".join(UpperCamelCase__ ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = re.sub(R"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own __lowerCAmelCase = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir __lowerCAmelCase = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , ) __lowerCAmelCase = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams __lowerCAmelCase = variation.replace(" " , "-" ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f: f.write(result.stdout ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f: __lowerCAmelCase = json.load(UpperCamelCase__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ): '''simple docstring''' __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = f"{id}: {variation:<{longest_variation_len}}" __lowerCAmelCase = f"{preamble}: " __lowerCAmelCase = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ): __lowerCAmelCase = process_run_single( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __lowerCAmelCase = single_run_metrics[target_metric_key] if not math.isnan(UpperCamelCase__ ): metrics.append(UpperCamelCase__ ) results.append(UpperCamelCase__ ) outcome += "✓" else: outcome += "✘" __lowerCAmelCase = f"\33[2K\r{outcome}" if len(UpperCamelCase__ ) > 0: __lowerCAmelCase = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} __lowerCAmelCase = round(mean_metrics[target_metric_key] , 2 ) __lowerCAmelCase = f"{outcome} {mean_target}" if len(UpperCamelCase__ ) > 1: results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}" print(UpperCamelCase__ ) __lowerCAmelCase = variation return mean_metrics else: print(UpperCamelCase__ ) return {variation_key: variation, target_metric_key: nan} def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = torch.cuda.get_device_properties(torch.device("cuda" ) ) return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = pd.DataFrame(UpperCamelCase__ ) __lowerCAmelCase = '''variation''' __lowerCAmelCase = '''diff_%''' __lowerCAmelCase = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan __lowerCAmelCase = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCamelCase__ ): # as a fallback, use the minimal value as the sentinel __lowerCAmelCase = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCamelCase__ ): __lowerCAmelCase = df.apply( lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns __lowerCAmelCase = [variation_key, target_metric_key, diff_key, *report_metric_keys] __lowerCAmelCase = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols # capitalize __lowerCAmelCase = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible __lowerCAmelCase = df.rename(lambda _UpperCamelCase : c.replace("_" , "<br>" ) , axis="columns" ) __lowerCAmelCase = df.rename(lambda _UpperCamelCase : c.replace("_" , "\n" ) , axis="columns" ) __lowerCAmelCase = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] print("\n\n".join(UpperCamelCase__ ) ) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , ) parser.add_argument( "--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'" , ) parser.add_argument( "--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = args.output_dir Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) __lowerCAmelCase = get_base_command(UpperCamelCase__ , UpperCamelCase__ ) # split each dimension into its --foo variations __lowerCAmelCase = [list(map(str.strip , re.split(R"\|" , UpperCamelCase__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty __lowerCAmelCase = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) ) __lowerCAmelCase = max(len(UpperCamelCase__ ) for x in variations ) # split wanted keys __lowerCAmelCase = args.report_metric_keys.split() # capture prints into a log file for convenience __lowerCAmelCase = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt" print(f"\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt" ) print(f"and this script\'s output is also piped into {report_fn}" ) __lowerCAmelCase = Tee(UpperCamelCase__ ) print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" ) print(f"Base command: {' '.join(UpperCamelCase__ )}" ) __lowerCAmelCase = '''variation''' __lowerCAmelCase = [] for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ): __lowerCAmelCase = base_cmd + variation.split() results.append( process_run( id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) ) process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ ) if __name__ == "__main__": main()
352
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer A : Any = logging.get_logger(__name__) class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : int ="""AutoTokenizer""" __UpperCAmelCase : Union[str, Any] =["""tokenizer"""] __UpperCAmelCase : Tuple ={ """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , __a , __a=None ): super().__init__(__a ) __lowerCAmelCase = speaker_embeddings @classmethod def snake_case ( cls , __a , __a="speaker_embeddings_path.json" , **__a ): if speaker_embeddings_dict_path is not None: __lowerCAmelCase = get_file_from_repo( __a , __a , subfolder=kwargs.pop("subfolder" , __a ) , cache_dir=kwargs.pop("cache_dir" , __a ) , force_download=kwargs.pop("force_download" , __a ) , proxies=kwargs.pop("proxies" , __a ) , resume_download=kwargs.pop("resume_download" , __a ) , local_files_only=kwargs.pop("local_files_only" , __a ) , use_auth_token=kwargs.pop("use_auth_token" , __a ) , revision=kwargs.pop("revision" , __a ) , ) if speaker_embeddings_path is None: logger.warning( f"`{os.path.join(__a , __a )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." ) __lowerCAmelCase = None else: with open(__a ) as speaker_embeddings_json: __lowerCAmelCase = json.load(__a ) else: __lowerCAmelCase = None __lowerCAmelCase = AutoTokenizer.from_pretrained(__a , **__a ) return cls(tokenizer=__a , speaker_embeddings=__a ) def snake_case ( self , __a , __a="speaker_embeddings_path.json" , __a="speaker_embeddings" , __a = False , **__a , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(__a , __a , "v2" ) , exist_ok=__a ) __lowerCAmelCase = {} __lowerCAmelCase = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": __lowerCAmelCase = self._load_voice_preset(__a ) __lowerCAmelCase = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["repo_or_path"] , __a , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__a , ) __lowerCAmelCase = os.path.join(__a , f"{prompt_key}_{key}.npy" ) __lowerCAmelCase = tmp_dict with open(os.path.join(__a , __a ) , "w" ) as fp: json.dump(__a , __a ) super().save_pretrained(__a , __a , **__a ) def snake_case ( self , __a = None , **__a ): __lowerCAmelCase = self.speaker_embeddings[voice_preset] __lowerCAmelCase = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." ) __lowerCAmelCase = get_file_from_repo( self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , __a ) , cache_dir=kwargs.pop("cache_dir" , __a ) , force_download=kwargs.pop("force_download" , __a ) , proxies=kwargs.pop("proxies" , __a ) , resume_download=kwargs.pop("resume_download" , __a ) , local_files_only=kwargs.pop("local_files_only" , __a ) , use_auth_token=kwargs.pop("use_auth_token" , __a ) , revision=kwargs.pop("revision" , __a ) , ) if path is None: raise ValueError( f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." ) __lowerCAmelCase = np.load(__a ) return voice_preset_dict def snake_case ( self , __a = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f"Voice preset unrecognized, missing {key} as a key." ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." ) def __call__( self , __a=None , __a=None , __a="pt" , __a=2_56 , __a=False , __a=True , __a=False , **__a , ): if voice_preset is not None and not isinstance(__a , __a ): if ( isinstance(__a , __a ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): __lowerCAmelCase = self._load_voice_preset(__a ) else: if isinstance(__a , __a ) and not voice_preset.endswith(".npz" ): __lowerCAmelCase = voice_preset + ".npz" __lowerCAmelCase = np.load(__a ) if voice_preset is not None: self._validate_voice_preset_dict(__a , **__a ) __lowerCAmelCase = BatchFeature(data=__a , tensor_type=__a ) __lowerCAmelCase = self.tokenizer( __a , return_tensors=__a , padding="max_length" , max_length=__a , return_attention_mask=__a , return_token_type_ids=__a , add_special_tokens=__a , **__a , ) if voice_preset is not None: __lowerCAmelCase = voice_preset return encoded_text
259
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class __A : '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=[1, 1, 2] , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : List[Any]="gelu_new" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=False , ) ->int: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = block_sizes snake_case_ = num_decoder_layers snake_case_ = d_model snake_case_ = n_head snake_case_ = d_head snake_case_ = d_inner snake_case_ = hidden_act snake_case_ = hidden_dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = 2 snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope snake_case_ = initializer_std # Used in the tests to check the size of the first attention layer snake_case_ = n_head # Used in the tests to check the size of the first hidden state snake_case_ = self.d_model # Used in the tests to check the number of output hidden states/attentions snake_case_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: snake_case_ = self.num_hidden_layers + 2 def lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , ) ->str: """simple docstring""" snake_case_ = TFFunnelModel(config=UpperCAmelCase_ ) snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} snake_case_ = model(UpperCAmelCase_ ) snake_case_ = [input_ids, input_mask] snake_case_ = model(UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) snake_case_ = False snake_case_ = TFFunnelModel(config=UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) snake_case_ = False snake_case_ = TFFunnelModel(config=UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , ) ->Optional[Any]: """simple docstring""" snake_case_ = TFFunnelBaseModel(config=UpperCAmelCase_ ) snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} snake_case_ = model(UpperCAmelCase_ ) snake_case_ = [input_ids, input_mask] snake_case_ = model(UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) snake_case_ = False snake_case_ = TFFunnelBaseModel(config=UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) snake_case_ = False snake_case_ = TFFunnelBaseModel(config=UpperCAmelCase_ ) snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ) ->List[Any]: """simple docstring""" snake_case_ = TFFunnelForPreTraining(config=UpperCAmelCase_ ) snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , ) ->Tuple: """simple docstring""" snake_case_ = TFFunnelForMaskedLM(config=UpperCAmelCase_ ) snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , ) ->Union[str, Any]: """simple docstring""" snake_case_ = self.num_labels snake_case_ = TFFunnelForSequenceClassification(config=UpperCAmelCase_ ) snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , ) ->Union[str, Any]: """simple docstring""" snake_case_ = self.num_choices snake_case_ = TFFunnelForMultipleChoice(config=UpperCAmelCase_ ) snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) ) snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) ) snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) ) snake_case_ = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , ) ->str: """simple docstring""" snake_case_ = self.num_labels snake_case_ = TFFunnelForTokenClassification(config=UpperCAmelCase_ ) snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , ) ->int: """simple docstring""" snake_case_ = TFFunnelForQuestionAnswering(config=UpperCAmelCase_ ) snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} snake_case_ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[Any] ) ->str: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Union[str, Any] = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __lowercase: Any = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __lowercase: Optional[Any] = False __lowercase: Any = False def lowerCAmelCase ( self : int ) ->str: """simple docstring""" snake_case_ = TFFunnelModelTester(self ) snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ ) def lowerCAmelCase ( self : int ) ->Dict: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ) ->List[Any]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ ) def lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ ) @require_tf class __A (snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Any = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __lowercase: Dict = False __lowercase: str = False def lowerCAmelCase ( self : Tuple ) ->Optional[Any]: """simple docstring""" snake_case_ = TFFunnelModelTester(self , base=UpperCAmelCase_ ) snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->int: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : List[str] ) ->List[Any]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
347
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[Any] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Tuple = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[Any] = ["""sentencepiece"""] def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Union[str, Any] = ["""sentencepiece"""] def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Optional[int] = ["""sentencepiece"""] def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Dict = ["""sentencepiece"""] def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: int = ["""sentencepiece"""] def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: List[str] = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]: """simple docstring""" requires_backends(self , ["""sentencepiece"""] ) class __A (metaclass=snake_case__): '''simple docstring''' __lowercase: Any = ["""sentencepiece"""] def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str: """simple docstring""" requires_backends(self , ["""sentencepiece"""] )
347
1
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _snake_case ( _SCREAMING_SNAKE_CASE : str = "laptop" ) -> DataFrame: """simple docstring""" lowerCAmelCase = f'https://www.amazon.in/laptop/s?k={product}' lowerCAmelCase = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } lowerCAmelCase = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).text ) # Initialize a Pandas dataframe with the column titles lowerCAmelCase = DataFrame( columns=[ """Product Title""", """Product Link""", """Current Price of the product""", """Product Rating""", """MRP of the product""", """Discount""", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( """div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ): try: lowerCAmelCase = item.ha.text lowerCAmelCase = """https://www.amazon.in/""" + item.ha.a["""href"""] lowerCAmelCase = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text try: lowerCAmelCase = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text except AttributeError: lowerCAmelCase = """Not available""" try: lowerCAmelCase = ( """₹""" + item.find( """span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1] ) except AttributeError: lowerCAmelCase = """""" try: lowerCAmelCase = float( ( ( float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) ) - float(product_price.strip("""₹""" ).replace(""",""" , """""" ) ) ) / float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) ) ) * 100 ) except ValueError: lowerCAmelCase = float("""nan""" ) except AttributeError: pass lowerCAmelCase = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCAmelCase = """ """ lowerCAmelCase = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": UpperCAmelCase = 'headphones' get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
187
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __snake_case( unittest.TestCase ): '''simple docstring''' def __snake_case ( self , A_ ) -> str: lowerCAmelCase = 3 lowerCAmelCase = 250 lowerCAmelCase = ids_tensor((batch_size, length) , A_ ) lowerCAmelCase = torch.ones((batch_size, length) , device=A_ , dtype=torch.float ) / length return input_ids, scores def __snake_case ( self ) -> Any: lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 ) lowerCAmelCase = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(A_ , A_ ) ) lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 ) self.assertFalse(criteria(A_ , A_ ) ) lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 ) self.assertTrue(criteria(A_ , A_ ) ) def __snake_case ( self ) -> Optional[int]: lowerCAmelCase = MaxLengthCriteria(max_length=10 ) lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 ) self.assertFalse(criteria(A_ , A_ ) ) lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 ) self.assertFalse(criteria(A_ , A_ ) ) lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 ) self.assertTrue(criteria(A_ , A_ ) ) def __snake_case ( self ) -> List[Any]: lowerCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 ) self.assertFalse(criteria(A_ , A_ ) ) lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 ) self.assertFalse(criteria(A_ , A_ ) ) lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 ) self.assertTrue(criteria(A_ , A_ ) ) lowerCAmelCase = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __snake_case ( self ) -> List[str]: lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 ) lowerCAmelCase = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(A_ , A_ ) ) lowerCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(A_ , A_ ) ) def __snake_case ( self ) -> Optional[int]: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(A_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(A_ ) , 1 )
187
1
"""simple docstring""" from math import factorial def _A (__a = 20 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... SCREAMING_SNAKE_CASE_ : List[str] = n // 2 return int(factorial(__a ) / (factorial(__a ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: UpperCAmelCase_ : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number.""")
91
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Dict = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : Any = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE_ : str = '''fp16''' self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
91
1
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _a ( _lowerCAmelCase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class _a ( unittest.TestCase ): @property def a ( self : int ) -> Any: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase : List[str] = ort.SessionOptions() _UpperCamelCase : Optional[Any] = False return options def a ( self : List[Any] ) -> Dict: '''simple docstring''' _UpperCamelCase : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) _UpperCamelCase : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) _UpperCamelCase : int = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''', revision='''onnx''', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCamelCase : List[Any] = '''A red cat sitting on a park bench''' _UpperCamelCase : Dict = np.random.RandomState(0 ) _UpperCamelCase : List[Any] = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=lowerCAmelCase__, output_type='''np''', ) _UpperCamelCase : Any = output.images _UpperCamelCase : List[str] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) _UpperCamelCase : int = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def a ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) _UpperCamelCase : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) _UpperCamelCase : Tuple = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''', subfolder='''scheduler''', revision='''onnx''' ) _UpperCamelCase : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''', revision='''onnx''', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCamelCase : Optional[Any] = '''A red cat sitting on a park bench''' _UpperCamelCase : List[Any] = np.random.RandomState(0 ) _UpperCamelCase : Union[str, Any] = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=lowerCAmelCase__, output_type='''np''', ) _UpperCamelCase : Optional[Any] = output.images _UpperCamelCase : Tuple = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) _UpperCamelCase : Dict = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
351
"""simple docstring""" import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 UpperCamelCase_ =0b1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 UpperCamelCase_ =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class _a : def __init__( self : str ) -> str: '''simple docstring''' _UpperCamelCase : str = WATERMARK_BITS _UpperCamelCase : Optional[int] = WatermarkEncoder() self.encoder.set_watermark('''bits''', self.watermark ) def snake_case ( self : Dict, lowerCAmelCase__ : torch.FloatTensor ) -> int: '''simple docstring''' if images.shape[-1] < 2_5_6: return images _UpperCamelCase : Union[str, Any] = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1 ).float().numpy() _UpperCamelCase : List[str] = [self.encoder.encode(lowerCAmelCase__, '''dwtDct''' ) for image in images] _UpperCamelCase : Dict = torch.from_numpy(np.array(lowerCAmelCase__ ) ).permute(0, 3, 1, 2 ) _UpperCamelCase : Optional[int] = torch.clamp(2 * (images / 2_5_5 - 0.5), min=-1.0, max=1.0 ) return images
128
0
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params lowerCamelCase__ : Any = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['memory_attention', 'encoder_attn'], ['attention', 'attn'], ['/', '.'], ['.LayerNorm.gamma', '_layer_norm.weight'], ['.LayerNorm.beta', '_layer_norm.bias'], ['r.layer_', 'r.layers.'], ['output_proj', 'out_proj'], ['ffn.dense_1.', 'fc2.'], ['ffn.dense.', 'fc1.'], ['ffn_layer_norm', 'final_layer_norm'], ['kernel', 'weight'], ['encoder_layer_norm.', 'encoder.layer_norm.'], ['decoder_layer_norm.', 'decoder.layer_norm.'], ['embeddings.weights', 'shared.weight'], ] def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Union[str, Any]: for pegasus_name, hf_name in PATTERNS: SCREAMING_SNAKE_CASE_ = k.replace(__UpperCAmelCase , __UpperCAmelCase ) return k def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Any: SCREAMING_SNAKE_CASE_ = DEFAULTS.copy() cfg_kwargs.update(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = PegasusConfig(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = PegasusForConditionalGeneration(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = torch_model.model.state_dict() SCREAMING_SNAKE_CASE_ = {} for k, v in tf_weights.items(): SCREAMING_SNAKE_CASE_ = rename_state_dict_key(__UpperCAmelCase ) if new_k not in sd: raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if "dense" in k or "proj" in new_k: SCREAMING_SNAKE_CASE_ = v.T SCREAMING_SNAKE_CASE_ = torch.tensor(__UpperCAmelCase , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}" # make sure embedding.padding_idx is respected SCREAMING_SNAKE_CASE_ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] ) SCREAMING_SNAKE_CASE_ = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE_ = mapping['''shared.weight'''] SCREAMING_SNAKE_CASE_ = {k: torch.zeros_like(__UpperCAmelCase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping} mapping.update(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = torch_model.model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}" assert extra == [], f"no matches found for the following tf keys {extra}" return torch_model def UpperCAmelCase_ ( __UpperCAmelCase : Any="./ckpt/aeslc/model.ckpt-32000" ) -> Tuple: SCREAMING_SNAKE_CASE_ = tf.train.list_variables(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(__UpperCAmelCase , desc='converting tf checkpoint to dict' ): SCREAMING_SNAKE_CASE_ = any(pat in name for pat in ignore_name ) if skip_key: continue SCREAMING_SNAKE_CASE_ = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = array return tf_weights def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Any: SCREAMING_SNAKE_CASE_ = Path(__UpperCAmelCase ).parent.name SCREAMING_SNAKE_CASE_ = task_specific_params[f"summarization_{dataset}"]['''max_position_embeddings'''] SCREAMING_SNAKE_CASE_ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=__UpperCAmelCase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(__UpperCAmelCase ) # convert model SCREAMING_SNAKE_CASE_ = get_tf_weights_as_numpy(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = task_specific_params[f"summarization_{dataset}"] if dataset == "large": SCREAMING_SNAKE_CASE_ = task_specific_params SCREAMING_SNAKE_CASE_ = convert_pegasus(__UpperCAmelCase , __UpperCAmelCase ) torch_model.save_pretrained(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = torch_model.state_dict() sd.pop('model.decoder.embed_positions.weight' ) sd.pop('model.encoder.embed_positions.weight' ) torch.save(__UpperCAmelCase , Path(__UpperCAmelCase ) / 'pytorch_model.bin' ) if __name__ == "__main__": lowerCamelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.') lowerCamelCase__ : int = parser.parse_args() if args.save_dir is None: lowerCamelCase__ : str = Path(args.tf_ckpt_path).parent.name lowerCamelCase__ : Tuple = os.path.join('pegasus', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
225
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowercase_ = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def a__ ( snake_case ): """simple docstring""" assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def a__ ( snake_case , snake_case ): """simple docstring""" if args.student_type == "roberta": __SCREAMING_SNAKE_CASE : int = False elif args.student_type == "gpt2": __SCREAMING_SNAKE_CASE : Optional[int] = False def a__ ( snake_case , snake_case ): """simple docstring""" if args.student_type == "roberta": __SCREAMING_SNAKE_CASE : Dict = False def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' ) __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() sanity_checks(snake_case ) # ARGS # init_gpu_params(snake_case ) set_seed(snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(snake_case ) , snake_case , indent=4 ) git_log(args.dump_path ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type] __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __SCREAMING_SNAKE_CASE : Optional[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) __SCREAMING_SNAKE_CASE : Any = special_tok_ids __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , '''rb''' ) as fp: __SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , '''rb''' ) as fp: __SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case ) else: __SCREAMING_SNAKE_CASE : Optional[int] = None __SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config ) __SCREAMING_SNAKE_CASE : Dict = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case ) else: __SCREAMING_SNAKE_CASE : str = student_model_class(snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('''Student loaded.''' ) # TEACHER # __SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case , snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case , snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __SCREAMING_SNAKE_CASE : int = Distiller( params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
303
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging A__ : List[str] =logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase ( snake_case_ ): _lowercase: Any = ['''pixel_values'''] def __init__( self : Any , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = True , **__snake_case : int , ) -> None: super().__init__(**__snake_case ) _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_24} _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case , param_name="""crop_size""" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = resample _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD _lowerCAmelCase = do_convert_rgb def lowercase__ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) _lowerCAmelCase = get_resize_output_image_size(__snake_case , size=size["""shortest_edge"""] , default_to_square=__snake_case ) return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Dict , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Any , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : List[str] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : str , ) -> List[str]: return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Tuple , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[str] , ) -> np.ndarray: return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Optional[int] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : int = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , **__snake_case : Tuple , ) -> PIL.Image.Image: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(__snake_case , param_name="""size""" , default_to_square=__snake_case ) _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" , default_to_square=__snake_case ) _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _lowerCAmelCase = make_list_of_images(__snake_case ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: _lowerCAmelCase = [convert_to_rgb(__snake_case ) for image in images] # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(__snake_case ) for image in images] if do_resize: _lowerCAmelCase = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images] if do_center_crop: _lowerCAmelCase = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images] if do_normalize: _lowerCAmelCase = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images] _lowerCAmelCase = {"""pixel_values""": images} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
352
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCAmelCase ( unittest.TestCase ): def lowercase__ ( self : List[str] ) -> Dict: _lowerCAmelCase = tempfile.mkdtemp() # fmt: off _lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on _lowerCAmelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) _lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] _lowerCAmelCase = {"""unk_token""": """<unk>"""} _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__snake_case ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__snake_case ) ) _lowerCAmelCase = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], """image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } _lowerCAmelCase = os.path.join(self.tmpdirname , __snake_case ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__snake_case , __snake_case ) def lowercase__ ( self : Optional[int] , **__snake_case : Dict ) -> List[str]: return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase__ ( self : List[str] , **__snake_case : Any ) -> List[Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase__ ( self : Tuple , **__snake_case : List[str] ) -> int: return ViTImageProcessor.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase__ ( self : str ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : str ) -> List[Any]: _lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] _lowerCAmelCase = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase__ ( self : Optional[Any] ) -> List[str]: _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_rust_tokenizer() _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case ) processor_slow.save_pretrained(self.tmpdirname ) _lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case ) _lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case ) processor_fast.save_pretrained(self.tmpdirname ) _lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __snake_case ) self.assertIsInstance(processor_fast.tokenizer , __snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __snake_case ) self.assertIsInstance(processor_fast.image_processor , __snake_case ) def lowercase__ ( self : Optional[int] ) -> Dict: _lowerCAmelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _lowerCAmelCase = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 ) _lowerCAmelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __snake_case ) def lowercase__ ( self : Tuple ) -> List[Any]: _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case ) _lowerCAmelCase = self.prepare_image_inputs() _lowerCAmelCase = image_processor(__snake_case , return_tensors="""np""" ) _lowerCAmelCase = processor(images=__snake_case , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase__ ( self : Any ) -> str: _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case ) _lowerCAmelCase = """lower newer""" _lowerCAmelCase = processor(text=__snake_case ) _lowerCAmelCase = tokenizer(__snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase__ ( self : str ) -> Union[str, Any]: _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case ) _lowerCAmelCase = """lower newer""" _lowerCAmelCase = self.prepare_image_inputs() _lowerCAmelCase = processor(text=__snake_case , images=__snake_case ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__snake_case ): processor() def lowercase__ ( self : Union[str, Any] ) -> Dict: _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case ) _lowerCAmelCase = self.prepare_image_inputs() _lowerCAmelCase = self.prepare_image_inputs() _lowerCAmelCase = processor(images=__snake_case , visual_prompt=__snake_case ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__snake_case ): processor() def lowercase__ ( self : int ) -> Tuple: _lowerCAmelCase = self.get_image_processor() _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case ) _lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCAmelCase = processor.batch_decode(__snake_case ) _lowerCAmelCase = tokenizer.batch_decode(__snake_case ) self.assertListEqual(__snake_case , __snake_case )
220
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
61
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> Dict: UpperCamelCase :Any = parent UpperCamelCase :Dict = 13 UpperCamelCase :List[Any] = 7 UpperCamelCase :List[Any] = True UpperCamelCase :Dict = True UpperCamelCase :Union[str, Any] = True UpperCamelCase :List[str] = True UpperCamelCase :Dict = 99 UpperCamelCase :Any = 32 UpperCamelCase :Tuple = 2 UpperCamelCase :Union[str, Any] = 4 UpperCamelCase :List[str] = 37 UpperCamelCase :Dict = '''gelu''' UpperCamelCase :Dict = 0.1 UpperCamelCase :Tuple = 0.1 UpperCamelCase :Dict = 512 UpperCamelCase :str = 16 UpperCamelCase :Optional[Any] = 2 UpperCamelCase :Dict = 0.02 UpperCamelCase :Optional[int] = 3 UpperCamelCase :int = 4 UpperCamelCase :Dict = None def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase :Optional[int] = None if self.use_input_mask: UpperCamelCase :Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase :Dict = None if self.use_token_type_ids: UpperCamelCase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase :Union[str, Any] = None UpperCamelCase :Optional[int] = None UpperCamelCase :Any = None if self.use_labels: UpperCamelCase :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase :int = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase :Union[str, Any] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCamelCase :Optional[Any] = TFRoFormerModel(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCamelCase :int = [input_ids, input_mask] UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ ) UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: UpperCamelCase :List[Any] = True UpperCamelCase :Union[str, Any] = TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCamelCase :Any = model(SCREAMING_SNAKE_CASE_ )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :str = TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Optional[Any] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase :List[Any] = self.num_labels UpperCamelCase :int = TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCamelCase :Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: UpperCamelCase :List[Any] = self.num_choices UpperCamelCase :Any = TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase :int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase :Any = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) ) UpperCamelCase :List[Any] = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCamelCase :Union[str, Any] = self.num_labels UpperCamelCase :Dict = TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: UpperCamelCase :Union[str, Any] = TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase :Dict = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self ) -> Tuple: UpperCamelCase :Optional[int] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = config_and_inputs UpperCamelCase :Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str =( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) UpperCamelCase_ : Tuple =( { 'feature-extraction': TFRoFormerModel, 'fill-mask': TFRoFormerForMaskedLM, 'question-answering': TFRoFormerForQuestionAnswering, 'text-classification': TFRoFormerForSequenceClassification, 'text-generation': TFRoFormerForCausalLM, 'token-classification': TFRoFormerForTokenClassification, 'zero-shot': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase_ : Tuple =False UpperCamelCase_ : Optional[Any] =False def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Any = TFRoFormerModelTester(self ) UpperCamelCase :Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> str: UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Dict = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :Tuple = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) UpperCamelCase :Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase :str = model(SCREAMING_SNAKE_CASE_ )[0] # TODO Replace vocab size UpperCamelCase :Tuple = 5_0000 UpperCamelCase :Optional[Any] = [1, 6, vocab_size] self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. UpperCamelCase :int = tf.constant( [ [ [-0.1205_3341, -1.026_4901, 0.2922_1946], [-1.513_3783, 0.19_7433, 0.1519_0607], [-5.013_5403, -3.90_0256, -0.8403_8764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] =1E-4 def UpperCAmelCase ( self ) -> Dict: UpperCamelCase :str = tf.constant([[4, 10]] ) UpperCamelCase :List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) UpperCamelCase :str = emba(input_ids.shape ) UpperCamelCase :List[str] = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=self.tolerance ) def UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase :Dict = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) UpperCamelCase :Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) UpperCamelCase :Any = emba.weight[:3, :5] tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=self.tolerance ) @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[Any] =1E-4 def UpperCAmelCase ( self ) -> List[str]: # 2,12,16,64 UpperCamelCase :List[Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 UpperCamelCase :List[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 UpperCamelCase :List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) UpperCamelCase :int = embed_positions([2, 16, 768] )[None, None, :, :] UpperCamelCase , UpperCamelCase :List[str] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase :str = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) UpperCamelCase :Optional[int] = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE_ , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
259
0
'''simple docstring''' from math import pow def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count A : Any = int(pow(__lowerCAmelCase , __lowerCAmelCase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n A : Tuple = backtrack( __lowerCAmelCase , __lowerCAmelCase , current_number + 1 , __lowerCAmelCase , __lowerCAmelCase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. A : List[str] = backtrack( __lowerCAmelCase , __lowerCAmelCase , current_number + 1 , __lowerCAmelCase , __lowerCAmelCase ) return current_sum, solutions_count def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(__lowerCAmelCase , __lowerCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
362
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Tuple = image_size A : int = patch_size A : Optional[int] = num_channels A : str = is_training A : List[Any] = use_labels A : Any = hidden_size A : Any = num_hidden_layers A : Optional[int] = num_attention_heads A : Any = intermediate_size A : List[str] = hidden_act A : str = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Optional[int] = initializer_range A : Dict = scope A : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[Any] = (image_size // patch_size) ** 2 A : Tuple = num_patches + 2 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Tuple = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) A : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : Optional[int] = 1 A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE ) A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : str = self.type_sequence_label_size A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Optional[Any] = 1 A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A, A, A : Tuple = config_and_inputs A : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = TFDeiTModelTester(self ) A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) A : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) A : Dict = self.default_image_processor A : List[str] = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0