code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a__ : Optional[int] = logging.get_logger(__name__) class lowercase ( UpperCAmelCase_ ): """simple docstring""" snake_case_ = ['pixel_values'] def __init__( self : List[Any] , a_ : bool = True , a_ : int = 32 , a_ : Optional[int]=PILImageResampling.BILINEAR , a_ : bool = True , **a_ : Union[str, Any] , ): """simple docstring""" lowerCamelCase__ = do_resize lowerCamelCase__ = do_rescale lowerCamelCase__ = size_divisor lowerCamelCase__ = resample super().__init__(**a_ ) def _UpperCamelCase ( self : Dict , a_ : np.ndarray , a_ : int , a_ : Union[str, Any] , a_ : Optional[ChannelDimension] = None , **a_ : List[Any] ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ = get_image_size(a_ ) # Rounds the height and width down to the closest multiple of size_divisor lowerCamelCase__ = height // size_divisor * size_divisor lowerCamelCase__ = width // size_divisor * size_divisor lowerCamelCase__ = resize(a_ , (new_h, new_w) , resample=a_ , data_format=a_ , **a_ ) return image def _UpperCamelCase ( self : Tuple , a_ : np.ndarray , a_ : float , a_ : Optional[ChannelDimension] = None , **a_ : Any ): """simple docstring""" return rescale(image=a_ , scale=a_ , data_format=a_ , **a_ ) def _UpperCamelCase ( self : Dict , a_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Dict=None , a_ : Optional[bool] = None , a_ : Optional[Union[TensorType, str]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : Any , ): """simple docstring""" lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ = size_divisor if size_divisor is not None else self.size_divisor lowerCamelCase__ = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("""size_divisor is required for resizing""" ) lowerCamelCase__ = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError("""Invalid image(s)""" ) # All transformations expect numpy arrays. lowerCamelCase__ = [to_numpy_array(a_ ) for img in images] if do_resize: lowerCamelCase__ = [self.resize(a_ , size_divisor=a_ , resample=a_ ) for image in images] if do_rescale: lowerCamelCase__ = [self.rescale(a_ , scale=1 / 2_55 ) for image in images] lowerCamelCase__ = [to_channel_dimension_format(a_ , a_ ) for image in images] lowerCamelCase__ = {"""pixel_values""": images} return BatchFeature(data=a_ , tensor_type=a_ )
165
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Any = logging.get_logger(__name__) a__ : Dict = { """google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""", """google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""", """google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""", } class lowercase ( UpperCAmelCase_ ): """simple docstring""" snake_case_ = 'owlvit_text_model' def __init__( self : Optional[Any] , a_ : Optional[int]=4_94_08 , a_ : Optional[int]=5_12 , a_ : str=20_48 , a_ : Union[str, Any]=12 , a_ : Optional[Any]=8 , a_ : str=16 , a_ : Optional[int]="quick_gelu" , a_ : Optional[Any]=1e-5 , a_ : Optional[Any]=0.0 , a_ : List[str]=0.0_2 , a_ : Optional[int]=1.0 , a_ : str=0 , a_ : int=4_94_06 , a_ : int=4_94_07 , **a_ : Union[str, Any] , ): """simple docstring""" super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = intermediate_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = hidden_act lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = attention_dropout lowerCamelCase__ = initializer_range lowerCamelCase__ = initializer_factor @classmethod def _UpperCamelCase ( cls : Optional[int] , a_ : Union[str, os.PathLike] , **a_ : int ): """simple docstring""" cls._set_token_in_kwargs(a_ ) lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(a_ , **a_ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": lowerCamelCase__ = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(a_ , **a_ ) class lowercase ( UpperCAmelCase_ ): """simple docstring""" snake_case_ = 'owlvit_vision_model' def __init__( self : List[Any] , a_ : Tuple=7_68 , a_ : Dict=30_72 , a_ : List[str]=12 , a_ : Tuple=12 , a_ : Optional[Any]=3 , a_ : Dict=7_68 , a_ : Optional[int]=32 , a_ : int="quick_gelu" , a_ : Dict=1e-5 , a_ : Any=0.0 , a_ : str=0.0_2 , a_ : Optional[int]=1.0 , **a_ : str , ): """simple docstring""" super().__init__(**a_ ) lowerCamelCase__ = hidden_size lowerCamelCase__ = intermediate_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = hidden_act lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = attention_dropout lowerCamelCase__ = initializer_range lowerCamelCase__ = initializer_factor @classmethod def _UpperCamelCase ( cls : int , a_ : Union[str, os.PathLike] , **a_ : Any ): """simple docstring""" cls._set_token_in_kwargs(a_ ) lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(a_ , **a_ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": lowerCamelCase__ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(a_ , **a_ ) class lowercase ( UpperCAmelCase_ ): """simple docstring""" snake_case_ = 'owlvit' snake_case_ = True def __init__( self : Optional[int] , a_ : List[Any]=None , a_ : List[str]=None , a_ : str=5_12 , a_ : Optional[Any]=2.6_5_9_2 , a_ : Union[str, Any]=True , **a_ : Optional[Any] , ): """simple docstring""" super().__init__(**a_ ) if text_config is None: lowerCamelCase__ = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: lowerCamelCase__ = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) lowerCamelCase__ = OwlViTTextConfig(**a_ ) lowerCamelCase__ = OwlViTVisionConfig(**a_ ) lowerCamelCase__ = projection_dim lowerCamelCase__ = logit_scale_init_value lowerCamelCase__ = return_dict lowerCamelCase__ = 1.0 @classmethod def _UpperCamelCase ( cls : int , a_ : Union[str, os.PathLike] , **a_ : Optional[Any] ): """simple docstring""" cls._set_token_in_kwargs(a_ ) lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(a_ , **a_ ) if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(a_ , **a_ ) @classmethod def _UpperCamelCase ( cls : Optional[int] , a_ : Dict , a_ : Dict , **a_ : Dict ): """simple docstring""" lowerCamelCase__ = {} lowerCamelCase__ = text_config lowerCamelCase__ = vision_config return cls.from_dict(a_ , **a_ ) def _UpperCamelCase ( self : List[str] ): """simple docstring""" lowerCamelCase__ = copy.deepcopy(self.__dict__ ) lowerCamelCase__ = self.text_config.to_dict() lowerCamelCase__ = self.vision_config.to_dict() lowerCamelCase__ = self.__class__.model_type return output class lowercase ( UpperCAmelCase_ ): """simple docstring""" @property def _UpperCamelCase ( self : int ): """simple docstring""" return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _UpperCamelCase ( self : Dict ): """simple docstring""" return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _UpperCamelCase ( self : str ): """simple docstring""" return 1e-4 def _UpperCamelCase ( self : Any , a_ : "ProcessorMixin" , a_ : int = -1 , a_ : int = -1 , a_ : Optional["TensorType"] = None , ): """simple docstring""" lowerCamelCase__ = super().generate_dummy_inputs( processor.tokenizer , batch_size=a_ , seq_length=a_ , framework=a_ ) lowerCamelCase__ = super().generate_dummy_inputs( processor.image_processor , batch_size=a_ , framework=a_ ) return {**text_input_dict, **image_input_dict} @property def _UpperCamelCase ( self : Optional[Any] ): """simple docstring""" return 14
165
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : List[Any] = logging.get_logger(__name__) lowercase : Any = {} class __lowercase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase_ : Any = '''llama''' UpperCAmelCase_ : str = ['''past_key_values'''] def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Tuple: A : str = vocab_size A : Tuple = max_position_embeddings A : List[Any] = hidden_size A : Dict = intermediate_size A : Optional[Any] = num_hidden_layers A : Union[str, Any] = num_attention_heads # for backward compatibility if num_key_value_heads is None: A : int = num_attention_heads A : Optional[Any] = num_key_value_heads A : str = hidden_act A : Tuple = initializer_range A : Any = rms_norm_eps A : Any = pretraining_tp A : int = use_cache A : List[str] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def snake_case ( self ) -> Any: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'got {self.rope_scaling}' ) A : Dict = self.rope_scaling.get('''type''' , __UpperCAmelCase ) A : int = self.rope_scaling.get('''factor''' , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
706
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging lowercase : int = logging.get_logger(__name__) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): A : str = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), F'{len(lowerCamelCase_ )} != {len(lowerCamelCase_ )}' dest_layers.load_state_dict(layers_to_copy.state_dict() ) lowercase : Optional[int] = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } lowercase : Union[str, Any] = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ): try: A : Dict = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first' F' {n_student}' ) return list(range(lowerCamelCase_ ) ) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ): if n_student > n_teacher: raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' ) elif n_teacher == n_student: return list(range(lowerCamelCase_ ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ = "student" , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ): A : List[str] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.''' assert (e is not None) or (d is not None), _msg if isinstance(lowerCamelCase_ , lowerCamelCase_ ): AutoTokenizer.from_pretrained(lowerCamelCase_ ).save_pretrained(lowerCamelCase_ ) # purely for convenience A : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ ).eval() else: assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), F'teacher must be a model or string got type {type(lowerCamelCase_ )}' A : Tuple = teacher.config.to_diff_dict() try: A , A : str = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: A : str = teacher_e if d is None: A : List[str] = teacher_d init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} ) except AttributeError: # T5 if hasattr(teacher.config , '''num_encoder_layers''' ): A , A : str = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: A , A : List[str] = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: A : Union[str, Any] = teacher_e if d is None: A : Any = teacher_d if hasattr(teacher.config , '''num_encoder_layers''' ): init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} ) else: init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(lowerCamelCase_ ) # Copy weights A : Dict = teacher.config_class(**lowerCamelCase_ ) A : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCamelCase_ ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. A : int = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase_ ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save A , A : Tuple = list(range(lowerCamelCase_ ) ), list(range(lowerCamelCase_ ) ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to' F' {save_path}' ) student.save_pretrained(lowerCamelCase_ ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: A : List[int] = pick_layers_to_copy(lowerCamelCase_ , lowerCamelCase_ ) if d_layers_to_copy is None: A : List[int] = pick_layers_to_copy(lowerCamelCase_ , lowerCamelCase_ ) try: if hasattr( lowerCamelCase_ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase_ ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase_ ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase_ ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase_ ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase_ ) copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase_ ) logger.info( F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' ) A : Tuple = { '''teacher_type''': teacher.config.model_type, '''copied_encoder_layers''': e_layers_to_copy, '''copied_decoder_layers''': d_layers_to_copy, } student.save_pretrained(lowerCamelCase_ ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
423
0
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __UpperCamelCase ( nn.Module ): def __init__( self ,_A ,_A ,_A ,_A=0.0 ,_A = None ,_A = "geglu" ,_A = None ,_A = False ,_A = False ,_A = False ,_A = False ,_A = True ,_A = "layer_norm" ,_A = False ,): '''simple docstring''' super().__init__() _lowerCAmelCase : List[str] = only_cross_attention _lowerCAmelCase : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero' _lowerCAmelCase : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to""" F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: _lowerCAmelCase : Dict = AdaLayerNorm(_A ,_A ) elif self.use_ada_layer_norm_zero: _lowerCAmelCase : Optional[int] = AdaLayerNormZero(_A ,_A ) else: _lowerCAmelCase : Tuple = nn.LayerNorm(_A ,elementwise_affine=_A ) _lowerCAmelCase : str = Attention( query_dim=_A ,heads=_A ,dim_head=_A ,dropout=_A ,bias=_A ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_A ,) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. _lowerCAmelCase : Tuple = ( AdaLayerNorm(_A ,_A ) if self.use_ada_layer_norm else nn.LayerNorm(_A ,elementwise_affine=_A ) ) _lowerCAmelCase : Optional[int] = Attention( query_dim=_A ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_A ,dim_head=_A ,dropout=_A ,bias=_A ,upcast_attention=_A ,) # is self-attn if encoder_hidden_states is none else: _lowerCAmelCase : Optional[Any] = None _lowerCAmelCase : List[Any] = None # 3. Feed-forward _lowerCAmelCase : List[str] = nn.LayerNorm(_A ,elementwise_affine=_A ) _lowerCAmelCase : Tuple = FeedForward(_A ,dropout=_A ,activation_fn=_A ,final_dropout=_A ) # let chunk size default to None _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : List[Any] = 0 def __lowerCamelCase ( self ,_A ,_A ): '''simple docstring''' _lowerCAmelCase : str = chunk_size _lowerCAmelCase : int = dim def __lowerCamelCase ( self ,_A ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,): '''simple docstring''' if self.use_ada_layer_norm: _lowerCAmelCase : Tuple = self.norma(_A ,_A ) elif self.use_ada_layer_norm_zero: _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[str] = self.norma( _A ,_A ,_A ,hidden_dtype=hidden_states.dtype ) else: _lowerCAmelCase : List[Any] = self.norma(_A ) _lowerCAmelCase : Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {} _lowerCAmelCase : Optional[int] = self.attna( _A ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_A ,**_A ,) if self.use_ada_layer_norm_zero: _lowerCAmelCase : str = gate_msa.unsqueeze(1 ) * attn_output _lowerCAmelCase : Optional[Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: _lowerCAmelCase : List[Any] = ( self.norma(_A ,_A ) if self.use_ada_layer_norm else self.norma(_A ) ) _lowerCAmelCase : Union[str, Any] = self.attna( _A ,encoder_hidden_states=_A ,attention_mask=_A ,**_A ,) _lowerCAmelCase : List[Any] = attn_output + hidden_states # 3. Feed-forward _lowerCAmelCase : Dict = self.norma(_A ) if self.use_ada_layer_norm_zero: _lowerCAmelCase : str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" ) _lowerCAmelCase : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size _lowerCAmelCase : Tuple = torch.cat( [self.ff(_A ) for hid_slice in norm_hidden_states.chunk(_A ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,) else: _lowerCAmelCase : Optional[int] = self.ff(_A ) if self.use_ada_layer_norm_zero: _lowerCAmelCase : Optional[int] = gate_mlp.unsqueeze(1 ) * ff_output _lowerCAmelCase : int = ff_output + hidden_states return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self ,_A ,_A = None ,_A = 4 ,_A = 0.0 ,_A = "geglu" ,_A = False ,): '''simple docstring''' super().__init__() _lowerCAmelCase : Dict = int(dim * mult ) _lowerCAmelCase : str = dim_out if dim_out is not None else dim if activation_fn == "gelu": _lowerCAmelCase : Optional[Any] = GELU(_A ,_A ) if activation_fn == "gelu-approximate": _lowerCAmelCase : str = GELU(_A ,_A ,approximate='tanh' ) elif activation_fn == "geglu": _lowerCAmelCase : Dict = GEGLU(_A ,_A ) elif activation_fn == "geglu-approximate": _lowerCAmelCase : Dict = ApproximateGELU(_A ,_A ) _lowerCAmelCase : Union[str, Any] = nn.ModuleList([] ) # project in self.net.append(_A ) # project dropout self.net.append(nn.Dropout(_A ) ) # project out self.net.append(nn.Linear(_A ,_A ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(_A ) ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' for module in self.net: _lowerCAmelCase : Dict = module(_A ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self ,_A ,_A ,_A = "none" ): '''simple docstring''' super().__init__() _lowerCAmelCase : str = nn.Linear(_A ,_A ) _lowerCAmelCase : Any = approximate def __lowerCamelCase ( self ,_A ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(_A ,approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.proj(_A ) _lowerCAmelCase : Union[str, Any] = self.gelu(_A ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self ,_A ,_A ): '''simple docstring''' super().__init__() _lowerCAmelCase : str = nn.Linear(_A ,dim_out * 2 ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(_A ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase, _lowerCAmelCase : List[str] = self.proj(_A ).chunk(2 ,dim=-1 ) return hidden_states * self.gelu(_A ) class __UpperCamelCase ( nn.Module ): def __init__( self ,_A ,_A ): '''simple docstring''' super().__init__() _lowerCAmelCase : Dict = nn.Linear(_A ,_A ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase : Dict = self.proj(_A ) return x * torch.sigmoid(1.7_0_2 * x ) class __UpperCamelCase ( nn.Module ): def __init__( self ,_A ,_A ): '''simple docstring''' super().__init__() _lowerCAmelCase : Dict = nn.Embedding(_A ,_A ) _lowerCAmelCase : Dict = nn.SiLU() _lowerCAmelCase : int = nn.Linear(_A ,embedding_dim * 2 ) _lowerCAmelCase : Optional[int] = nn.LayerNorm(_A ,elementwise_affine=_A ) def __lowerCamelCase ( self ,_A ,_A ): '''simple docstring''' _lowerCAmelCase : List[str] = self.linear(self.silu(self.emb(_A ) ) ) _lowerCAmelCase, _lowerCAmelCase : Dict = torch.chunk(_A ,2 ) _lowerCAmelCase : Dict = self.norm(_A ) * (1 + scale) + shift return x class __UpperCamelCase ( nn.Module ): def __init__( self ,_A ,_A ): '''simple docstring''' super().__init__() _lowerCAmelCase : Any = CombinedTimestepLabelEmbeddings(_A ,_A ) _lowerCAmelCase : int = nn.SiLU() _lowerCAmelCase : Any = nn.Linear(_A ,6 * embedding_dim ,bias=_A ) _lowerCAmelCase : Optional[Any] = nn.LayerNorm(_A ,elementwise_affine=_A ,eps=1E-6 ) def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=None ): '''simple docstring''' _lowerCAmelCase : int = self.linear(self.silu(self.emb(_A ,_A ,hidden_dtype=_A ) ) ) _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = emb.chunk(6 ,dim=1 ) _lowerCAmelCase : str = self.norm(_A ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __UpperCamelCase ( nn.Module ): def __init__( self ,_A ,_A ,_A ,_A = None ,_A = 1E-5 ): '''simple docstring''' super().__init__() _lowerCAmelCase : Tuple = num_groups _lowerCAmelCase : List[Any] = eps if act_fn is None: _lowerCAmelCase : Union[str, Any] = None else: _lowerCAmelCase : Union[str, Any] = get_activation(_A ) _lowerCAmelCase : Dict = nn.Linear(_A ,out_dim * 2 ) def __lowerCamelCase ( self ,_A ,_A ): '''simple docstring''' if self.act: _lowerCAmelCase : List[Any] = self.act(_A ) _lowerCAmelCase : int = self.linear(_A ) _lowerCAmelCase : Any = emb[:, :, None, None] _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = emb.chunk(2 ,dim=1 ) _lowerCAmelCase : Union[str, Any] = F.group_norm(_A ,self.num_groups ,eps=self.eps ) _lowerCAmelCase : Optional[int] = x * (1 + scale) + shift return x
259
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors _lowerCamelCase : Dict = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _UpperCAmelCase : int = "sequence-classification" def __init__( self : Optional[int] , lowercase : Any ): '''simple docstring''' if type(lowercase ) == dict: _snake_case = Namespace(**lowercase ) _snake_case = glue_output_modes[hparams.task] _snake_case = glue_tasks_num_labels[hparams.task] super().__init__(lowercase , lowercase , self.mode ) def A ( self : Optional[Any] , **lowercase : Optional[Any] ): '''simple docstring''' return self.model(**lowercase ) def A ( self : Optional[Any] , lowercase : str , lowercase : Tuple ): '''simple docstring''' _snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None _snake_case = self(**lowercase ) _snake_case = outputs[0] _snake_case = self.trainer.lr_schedulers[0]['scheduler'] _snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def A ( self : Optional[Any] ): '''simple docstring''' _snake_case = self.hparams _snake_case = processors[args.task]() _snake_case = processor.get_labels() for mode in ["train", "dev"]: _snake_case = self._feature_file(lowercase ) if os.path.exists(lowercase ) and not args.overwrite_cache: logger.info('Loading features from cached file %s' , lowercase ) else: logger.info('Creating features from dataset file at %s' , args.data_dir ) _snake_case = ( processor.get_dev_examples(args.data_dir ) if mode == 'dev' else processor.get_train_examples(args.data_dir ) ) _snake_case = convert_examples_to_features( lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('Saving features into cached file %s' , lowercase ) torch.save(lowercase , lowercase ) def A ( self : Dict , lowercase : str , lowercase : int , lowercase : bool = False ): '''simple docstring''' _snake_case = 'dev' if mode == 'test' else mode _snake_case = self._feature_file(lowercase ) logger.info('Loading features from cached file %s' , lowercase ) _snake_case = torch.load(lowercase ) _snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) _snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": _snake_case = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": _snake_case = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , ) def A ( self : str , lowercase : Optional[Any] , lowercase : str ): '''simple docstring''' _snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: _snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None _snake_case = self(**lowercase ) _snake_case , _snake_case = outputs[:2] _snake_case = logits.detach().cpu().numpy() _snake_case = inputs['labels'].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def A ( self : int , lowercase : Optional[int] ): '''simple docstring''' _snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item() _snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": _snake_case = np.argmax(lowercase , axis=1 ) elif self.hparams.glue_output_mode == "regression": _snake_case = np.squeeze(lowercase ) _snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 ) _snake_case = [[] for _ in range(out_label_ids.shape[0] )] _snake_case = [[] for _ in range(out_label_ids.shape[0] )] _snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )} _snake_case = dict(results.items() ) _snake_case = results return ret, preds_list, out_label_list def A ( self : int , lowercase : list ): '''simple docstring''' _snake_case , _snake_case , _snake_case = self._eval_end(lowercase ) _snake_case = ret['log'] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def A ( self : List[str] , lowercase : Any ): '''simple docstring''' _snake_case , _snake_case , _snake_case = self._eval_end(lowercase ) _snake_case = ret['log'] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def A ( lowercase : Tuple , lowercase : Any ): '''simple docstring''' BaseTransformer.add_model_specific_args(lowercase , lowercase ) parser.add_argument( '--max_seq_length' , default=128 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , ) parser.add_argument( '--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) return parser def a_ ( ) -> Union[str, Any]: _snake_case = argparse.ArgumentParser() add_generic_args(__lowercase , os.getcwd() ) _snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() ) _snake_case = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: _snake_case = os.path.join( './results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , ) os.makedirs(args.output_dir ) _snake_case = GLUETransformer(__lowercase ) _snake_case = generic_train(__lowercase , __lowercase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: _snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) ) _snake_case = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(__lowercase ) if __name__ == "__main__": main()
686
0
import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class snake_case_ ( unittest.TestCase ): def __A ( self ): SCREAMING_SNAKE_CASE_ : Any = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE_ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] ) SCREAMING_SNAKE_CASE_ : int = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def __A ( self ): SCREAMING_SNAKE_CASE_ : Tuple = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split() SCREAMING_SNAKE_CASE_ : List[str] = [sys.executable] + distributed_args execute_subprocess_async(lowercase_ , env=os.environ.copy() )
716
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase__: List[str] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__: Optional[int] = ["DPTFeatureExtractor"] lowerCAmelCase__: Any = ["DPTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__: Tuple = [ "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
311
0
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A : Optional[Any] = logging.get_logger(__name__) def UpperCamelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : str , snake_case_ : str ) -> Tuple: '''simple docstring''' __lowerCAmelCase = original_name.split(""".""" )[0] __lowerCAmelCase = key.split(""".""" ) __lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 2] ) __lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 1] ) __lowerCAmelCase = orig_block_num - offset __lowerCAmelCase = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def UpperCamelCase_ ( snake_case_ : Optional[int] ) -> int: '''simple docstring''' __lowerCAmelCase = OrderedDict() __lowerCAmelCase , __lowerCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith("""network""" ): __lowerCAmelCase = key.replace("""network""" , """poolformer.encoder""" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("""bias""" ) and "patch_embed" not in key: patch_emb_offset += 1 __lowerCAmelCase = key[: key.find("""proj""" )] __lowerCAmelCase = key.replace(_UpperCamelCase , f"""patch_embeddings.{total_embed_found}.""" ) __lowerCAmelCase = key.replace("""proj""" , """projection""" ) if key.endswith("""bias""" ): total_embed_found += 1 if "patch_embeddings" in key: __lowerCAmelCase = """poolformer.encoder.""" + key if "mlp.fc1" in key: __lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , """mlp.fc1""" , """output.conv1""" ) if "mlp.fc2" in key: __lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , """mlp.fc2""" , """output.conv2""" ) if "norm1" in key: __lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , """norm1""" , """before_norm""" ) if "norm2" in key: __lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , """norm2""" , """after_norm""" ) if "layer_scale_1" in key: __lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , """layer_scale_1""" , """layer_scale_1""" ) if "layer_scale_2" in key: __lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , """layer_scale_2""" , """layer_scale_2""" ) if "head" in key: __lowerCAmelCase = key.replace("""head""" , """classifier""" ) __lowerCAmelCase = value return new_state_dict def UpperCamelCase_ ( ) -> List[Any]: '''simple docstring''' __lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return image @torch.no_grad() def UpperCamelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : int ) -> int: '''simple docstring''' __lowerCAmelCase = PoolFormerConfig() # set attributes based on model_name __lowerCAmelCase = """huggingface/label-files""" __lowerCAmelCase = model_name[-3:] __lowerCAmelCase = 10_00 __lowerCAmelCase = """imagenet-1k-id2label.json""" __lowerCAmelCase = (1, 10_00) # set config attributes __lowerCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": __lowerCAmelCase = [2, 2, 6, 2] __lowerCAmelCase = [64, 1_28, 3_20, 5_12] __lowerCAmelCase = 4.0 __lowerCAmelCase = 0.9 elif size == "s24": __lowerCAmelCase = [4, 4, 12, 4] __lowerCAmelCase = [64, 1_28, 3_20, 5_12] __lowerCAmelCase = 4.0 __lowerCAmelCase = 0.9 elif size == "s36": __lowerCAmelCase = [6, 6, 18, 6] __lowerCAmelCase = [64, 1_28, 3_20, 5_12] __lowerCAmelCase = 4.0 __lowerCAmelCase = 1E-6 __lowerCAmelCase = 0.9 elif size == "m36": __lowerCAmelCase = [6, 6, 18, 6] __lowerCAmelCase = [96, 1_92, 3_84, 7_68] __lowerCAmelCase = 4.0 __lowerCAmelCase = 1E-6 __lowerCAmelCase = 0.9_5 elif size == "m48": __lowerCAmelCase = [8, 8, 24, 8] __lowerCAmelCase = [96, 1_92, 3_84, 7_68] __lowerCAmelCase = 4.0 __lowerCAmelCase = 1E-6 __lowerCAmelCase = 0.9_5 else: raise ValueError(f"""Size {size} not supported""" ) # load image processor __lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase ) # Prepare image __lowerCAmelCase = prepare_img() __lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="""pt""" ).pixel_values logger.info(f"""Converting model {model_name}...""" ) # load original state dict __lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("""cpu""" ) ) # rename keys __lowerCAmelCase = rename_keys(_UpperCamelCase ) # create HuggingFace model and load state dict __lowerCAmelCase = PoolFormerForImageClassification(_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) model.eval() # Define image processor __lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase ) __lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values # forward pass __lowerCAmelCase = model(_UpperCamelCase ) __lowerCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": __lowerCAmelCase = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] ) elif size == "s24": __lowerCAmelCase = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] ) elif size == "s36": __lowerCAmelCase = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] ) elif size == "m36": __lowerCAmelCase = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] ) elif size == "m48": __lowerCAmelCase = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] ) else: raise ValueError(f"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1E-2 ) # finally, save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": _A : Tuple = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _A : str = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
427
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class UpperCamelCase ( SCREAMING_SNAKE_CASE ): def UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = 8 # DPR tok SCREAMING_SNAKE_CASE = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok SCREAMING_SNAKE_CASE = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(snake_case__ ) ) def UpperCamelCase ( self : Optional[Any] ): """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def UpperCamelCase ( self : Optional[int] ): """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def UpperCamelCase ( self : Tuple ): """simple docstring""" shutil.rmtree(self.tmpdirname ) @require_tokenizers def UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'rag_tokenizer' ) SCREAMING_SNAKE_CASE = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) SCREAMING_SNAKE_CASE = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(snake_case__ ) rag_tokenizer.save_pretrained(snake_case__ ) SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(snake_case__ , config=snake_case__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , snake_case__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) SCREAMING_SNAKE_CASE = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] SCREAMING_SNAKE_CASE = tokenizer(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow def UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) SCREAMING_SNAKE_CASE = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] SCREAMING_SNAKE_CASE = tokenizer(snake_case__ ) self.assertIsNotNone(snake_case__ )
439
0
import argparse import os import re import packaging.version A__ = "examples/" A__ = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } A__ = { "init": "src/transformers/__init__.py", "setup": "setup.py", } A__ = "README.md" def _lowercase ( a_ : Union[str, Any] ,a_ : int ,a_ : Any ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'r' ,encoding='utf-8' ,newline='\n' ) as f: __magic_name__ = f.read() __magic_name__, __magic_name__ = REPLACE_PATTERNS[pattern] __magic_name__ = replace.replace('VERSION' ,__UpperCamelCase ) __magic_name__ = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase ) with open(__UpperCamelCase ,'w' ,encoding='utf-8' ,newline='\n' ) as f: f.write(__UpperCamelCase ) def _lowercase ( a_ : Optional[Any] ) -> Any: '''simple docstring''' for folder, directories, fnames in os.walk(__UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern='examples' ) def _lowercase ( a_ : Any ,a_ : str=False ) -> int: '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if not patch: update_version_in_examples(__UpperCamelCase ) def _lowercase ( ) -> Dict: '''simple docstring''' __magic_name__ = '🤗 Transformers currently provides the following architectures' __magic_name__ = '1. Want to contribute a new model?' with open(__UpperCamelCase ,'r' ,encoding='utf-8' ,newline='\n' ) as f: __magic_name__ = f.readlines() # Find the start of the list. __magic_name__ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __magic_name__ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): __magic_name__ = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' ,'https://huggingface.co/docs/transformers/model_doc' ,) index += 1 with open(__UpperCamelCase ,'w' ,encoding='utf-8' ,newline='\n' ) as f: f.writelines(__UpperCamelCase ) def _lowercase ( ) -> Optional[Any]: '''simple docstring''' with open(REPLACE_FILES['init'] ,'r' ) as f: __magic_name__ = f.read() __magic_name__ = REPLACE_PATTERNS['init'][0].search(__UpperCamelCase ).groups()[0] return packaging.version.parse(__UpperCamelCase ) def _lowercase ( a_ : Optional[Any]=False ) -> Tuple: '''simple docstring''' __magic_name__ = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: __magic_name__ = default_version.base_version elif patch: __magic_name__ = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: __magic_name__ = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. __magic_name__ = input(F'Which version are you releasing? [{default_version}]' ) if len(__UpperCamelCase ) == 0: __magic_name__ = default_version print(F'Updating version to {version}.' ) global_version_update(__UpperCamelCase ,patch=__UpperCamelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def _lowercase ( ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = get_version() __magic_name__ = F'{current_version.major}.{current_version.minor + 1}.0.dev0' __magic_name__ = current_version.base_version # Check with the user we got that right. __magic_name__ = input(F'Which version are we developing now? [{dev_version}]' ) if len(__UpperCamelCase ) == 0: __magic_name__ = dev_version print(F'Updating version to {version}.' ) global_version_update(__UpperCamelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": A__ = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") A__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
708
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A__ = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ "VAN_PRETRAINED_MODEL_ARCHIVE_LIST", "VanForImageClassification", "VanModel", "VanPreTrainedModel", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
184
0
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: List[Any] , UpperCamelCase__: Any=None , UpperCamelCase__: List[Any]=None ) -> Dict: """simple docstring""" if attention_mask is None: A = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class _UpperCamelCase : """simple docstring""" lowerCAmelCase = OPTConfig lowerCAmelCase = {} lowerCAmelCase = 'gelu' def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=False , a__=99 , a__=16 , a__=2 , a__=4 , a__=4 , a__="gelu" , a__=0.1 , a__=0.1 , a__=20 , a__=2 , a__=1 , a__=0 , a__=16 , a__=16 , ) -> str: A = parent A = batch_size A = seq_length A = is_training A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = eos_token_id A = pad_token_id A = bos_token_id A = embed_dim A = word_embed_proj_dim A = False def _UpperCAmelCase ( self ) -> Any: A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A = tf.concat([input_ids, eos_tensor] , axis=1 ) A = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A_ , **self.config_updates , ) A = prepare_opt_inputs_dict(A_ , A_ ) return config, inputs_dict def _UpperCAmelCase ( self , a__ , a__ ) -> Any: A = TFOPTModel(config=A_ ) A = inputs_dict['''input_ids'''] A = input_ids[:1, :] A = inputs_dict['''attention_mask'''][:1, :] A = 1 # first forward pass A = model(A_ , attention_mask=A_ , use_cache=A_ ) A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) , config.vocab_size ) A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A = tf.concat([input_ids, next_tokens] , axis=-1 ) A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A = model(A_ , attention_mask=A_ )[0] A = model(A_ , attention_mask=A_ , past_key_values=A_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A = output_from_no_past[:, -3:, random_slice_idx] A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(A_ , A_ , rtol=1e-3 ) @require_tf class _UpperCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" lowerCAmelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowerCAmelCase = (TFOPTForCausalLM,) if is_tf_available() else () lowerCAmelCase = ( {'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = 1_0 def _UpperCAmelCase ( self ) -> Optional[Any]: A = TFOPTModelTester(self ) A = ConfigTester(self , config_class=A_ ) def _UpperCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> int: A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*A_ ) def _UpperCAmelCase ( self ) -> Union[str, Any]: A = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(a__ , a__ ): if hasattr(A_ , """weight""" ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(A_ , """weight""" ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings A = model_class(config=A_ ) A = _get_word_embedding_weight(A_ , model.get_input_embeddings() ) A = _get_word_embedding_weight(A_ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(A_ ) A = _get_word_embedding_weight(A_ , model.get_input_embeddings() ) A = _get_word_embedding_weight(A_ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. A = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , A_ ) # check that weights remain the same after resizing A = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: A = False self.assertTrue(A_ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , A_ ) A = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: A = False self.assertTrue(A_ ) def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return tf.constant(__UpperCamelCase , dtype=tf.intaa ) @require_tf class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" lowerCAmelCase = 9_9 def _UpperCAmelCase ( self ) -> List[Any]: A = tf.ones((4, 1) , dtype=tf.intaa ) * 2 A = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) A = input_ids.shape[0] A = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def _UpperCAmelCase ( self ) -> Optional[int]: A = TFOPTModel.from_pretrained("""facebook/opt-350m""" ) A = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) A = tf.not_equal(A_ , model.config.pad_token_id ) with tf.GradientTape(): A = model(input_ids=A_ , attention_mask=A_ ).last_hidden_state A = (1, 11, 512) self.assertEqual(output.shape , A_ ) A = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-3 ) ) A = tf.function(A_ , jit_compile=A_ ) A = xla_generate(A_ , A_ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-2 ) ) @require_tf @slow class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> int: super().setUp() A = '''facebook/opt-350m''' def _UpperCAmelCase ( self ) -> Any: A = TFOPTForCausalLM.from_pretrained(self.path_model ) A = GPTaTokenizer.from_pretrained(self.path_model ) A = [ '''Today is a beautiful day and I want to''', '''In the city of''', '''Paris is the capital of France and''', '''Computers and mobile phones have taken''', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False A = tokenizer(A_ , return_tensors="""tf""" , padding=A_ , add_special_tokens=A_ ) A = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) A = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) ) A = tf.function(A_ , jit_compile=A_ ) A = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) ) @require_tf @slow class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" @property def _UpperCAmelCase ( self ) -> Dict: return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def _UpperCAmelCase ( self ) -> int: A = '''facebook/opt-125m''' A = [ '''Today is a beautiful day and I want to''', '''In the city of New York, the city''', '''Paris is the capital of France and the capital''', '''Computers and mobile phones have taken over the''', ] A = [] A = GPTaTokenizer.from_pretrained(A_ ) A = TFOPTForCausalLM.from_pretrained(A_ ) for prompt in self.prompts: A = tokenizer(A_ , return_tensors="""tf""" ).input_ids A = model.generate(A_ , max_length=10 ) A = tokenizer.batch_decode(A_ , skip_special_tokens=A_ ) predicted_outputs += generated_string self.assertListEqual(A_ , A_ ) def _UpperCAmelCase ( self ) -> Dict: A = '''facebook/opt-350m''' A = GPTaTokenizer.from_pretrained(A_ ) A = TFOPTForCausalLM.from_pretrained(A_ ) A = '''left''' # use different length sentences to test batching A = [ '''Hello, my dog is a little''', '''Today, I''', ] A = tokenizer(A_ , return_tensors="""tf""" , padding=A_ ) A = inputs['''input_ids'''] A = model.generate(input_ids=A_ , attention_mask=inputs["""attention_mask"""] ) A = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids A = model.generate(input_ids=A_ ) A = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) ) A = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids A = model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings ) A = tokenizer.batch_decode(A_ , skip_special_tokens=A_ ) A = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ ) A = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ ) A = [ '''Hello, my dog is a little bit of a dork.\nI\'m a little bit''', '''Today, I was in the middle of a conversation with a friend about the''', ] self.assertListEqual(A_ , A_ ) self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] ) def _UpperCAmelCase ( self ) -> Optional[Any]: A = '''facebook/opt-350m''' A = [ '''Today is a beautiful day and I want to''', '''In the city of San Francisco, the city''', '''Paris is the capital of France and the capital''', '''Computers and mobile phones have taken over the''', ] A = [] A = GPTaTokenizer.from_pretrained(A_ ) A = TFOPTForCausalLM.from_pretrained(A_ ) for prompt in self.prompts: A = tokenizer(A_ , return_tensors="""tf""" ).input_ids A = model.generate(A_ , max_length=10 ) A = tokenizer.batch_decode(A_ , skip_special_tokens=A_ ) predicted_outputs += generated_string self.assertListEqual(A_ , A_ )
641
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __snake_case ( UpperCamelCase_ ): _a = ['''image_processor''', '''tokenizer'''] _a = '''ViltImageProcessor''' _a = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : Optional[Any] , A_ : str=None , A_ : Any=None , **A_ : Dict): lowerCAmelCase_ : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , A_ , ) lowerCAmelCase_ : Optional[int] = kwargs.pop('''feature_extractor''') lowerCAmelCase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') super().__init__(A_ , A_) lowerCAmelCase_ : Any = self.image_processor def __call__( self : str , A_ : str , A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A_ : bool = True , A_ : Union[bool, str, PaddingStrategy] = False , A_ : Union[bool, str, TruncationStrategy] = None , A_ : Optional[int] = None , A_ : int = 0 , A_ : Optional[int] = None , A_ : Optional[bool] = None , A_ : Optional[bool] = None , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = True , A_ : Optional[Union[str, TensorType]] = None , **A_ : Optional[Any] , ): lowerCAmelCase_ : List[str] = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) # add pixel_values + pixel_mask lowerCAmelCase_ : List[Any] = self.image_processor(A_ , return_tensors=A_) encoding.update(A_) return encoding def UpperCAmelCase__ ( self : Tuple , *A_ : Union[str, Any] , **A_ : Any): return self.tokenizer.batch_decode(*A_ , **A_) def UpperCAmelCase__ ( self : Optional[Any] , *A_ : List[str] , **A_ : Union[str, Any]): return self.tokenizer.decode(*A_ , **A_) @property def UpperCAmelCase__ ( self : int): lowerCAmelCase_ : List[str] = self.tokenizer.model_input_names lowerCAmelCase_ : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def UpperCAmelCase__ ( self : Union[str, Any]): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A_ , ) return self.image_processor_class @property def UpperCAmelCase__ ( self : Dict): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A_ , ) return self.image_processor
171
0
from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run UpperCamelCase_ = True except (ImportError, AttributeError): UpperCamelCase_ = object def SCREAMING_SNAKE_CASE ( *snake_case__ , **snake_case__ ) -> str: pass UpperCamelCase_ = False UpperCamelCase_ = logging.get_logger('transformers-cli/serving') def SCREAMING_SNAKE_CASE ( snake_case__ ) -> List[Any]: __UpperCAmelCase =pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(A_ , args.host , args.port , args.workers ) class _SCREAMING_SNAKE_CASE ( _A ): a_ : Union[str, Any] = 42 class _SCREAMING_SNAKE_CASE ( _A ): a_ : Tuple = 42 a_ : str = 42 class _SCREAMING_SNAKE_CASE ( _A ): a_ : Optional[Any] = 42 class _SCREAMING_SNAKE_CASE ( _A ): a_ : Union[str, Any] = 42 class _SCREAMING_SNAKE_CASE ( _A ): @staticmethod def A__ (UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =parser.add_parser( '''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''') serve_parser.add_argument( '''--task''' , type=UpperCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , ) serve_parser.add_argument('''--host''' , type=UpperCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''') serve_parser.add_argument('''--port''' , type=UpperCamelCase__ , default=8_8_8_8 , help='''Port the serving will listen to.''') serve_parser.add_argument('''--workers''' , type=UpperCamelCase__ , default=1 , help='''Number of http workers''') serve_parser.add_argument('''--model''' , type=UpperCamelCase__ , help='''Model\'s name or path to stored model.''') serve_parser.add_argument('''--config''' , type=UpperCamelCase__ , help='''Model\'s config name or path to stored model.''') serve_parser.add_argument('''--tokenizer''' , type=UpperCamelCase__ , help='''Tokenizer name to use.''') serve_parser.add_argument( '''--device''' , type=UpperCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) serve_parser.set_defaults(func=UpperCamelCase__) def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =pipeline __UpperCAmelCase =host __UpperCAmelCase =port __UpperCAmelCase =workers if not _serve_dependencies_installed: raise RuntimeError( '''Using serve command requires FastAPI and uvicorn. ''' '''Please install transformers with [serving]: pip install \"transformers[serving]\".''' '''Or install FastAPI and uvicorn separately.''') else: logger.info(f"""Serving model over {host}:{port}""") __UpperCAmelCase =FastAPI( routes=[ APIRoute( '''/''' , self.model_info , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''GET'''] , ), APIRoute( '''/tokenize''' , self.tokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''POST'''] , ), APIRoute( '''/detokenize''' , self.detokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''POST'''] , ), APIRoute( '''/forward''' , self.forward , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''POST'''] , ), ] , timeout=6_0_0 , ) def A__ (self): '''simple docstring''' run(self._app , host=self.host , port=self.port , workers=self.workers) def A__ (self): '''simple docstring''' return ServeModelInfoResult(infos=vars(self._pipeline.model.config)) def A__ (self , UpperCAmelCase = Body(UpperCamelCase__ , embed=UpperCamelCase__) , UpperCAmelCase = Body(UpperCamelCase__ , embed=UpperCamelCase__)): '''simple docstring''' try: __UpperCAmelCase =self._pipeline.tokenizer.tokenize(UpperCamelCase__) if return_ids: __UpperCAmelCase =self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase__) return ServeTokenizeResult(tokens=UpperCamelCase__ , tokens_ids=UpperCamelCase__) else: return ServeTokenizeResult(tokens=UpperCamelCase__) except Exception as e: raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(UpperCamelCase__)}) def A__ (self , UpperCAmelCase = Body(UpperCamelCase__ , embed=UpperCamelCase__) , UpperCAmelCase = Body(UpperCamelCase__ , embed=UpperCamelCase__) , UpperCAmelCase = Body(UpperCamelCase__ , embed=UpperCamelCase__) , ): '''simple docstring''' try: __UpperCAmelCase =self._pipeline.tokenizer.decode(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return ServeDeTokenizeResult(model='''''' , text=UpperCamelCase__) except Exception as e: raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(UpperCamelCase__)}) async def A__ (self , UpperCAmelCase=Body(UpperCamelCase__ , embed=UpperCamelCase__)): '''simple docstring''' if len(UpperCamelCase__) == 0: return ServeForwardResult(output=[] , attention=[]) try: # Forward through the model __UpperCAmelCase =self._pipeline(UpperCamelCase__) return ServeForwardResult(output=UpperCamelCase__) except Exception as e: raise HTTPException(5_0_0 , {'''error''': str(UpperCamelCase__)})
710
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def A__ (self): '''simple docstring''' __UpperCAmelCase ='''ylacombe/bark-small''' __UpperCAmelCase =tempfile.mkdtemp() __UpperCAmelCase ='''en_speaker_1''' __UpperCAmelCase ='''This is a test string''' __UpperCAmelCase ='''speaker_embeddings_path.json''' __UpperCAmelCase ='''speaker_embeddings''' def A__ (self , **UpperCAmelCase): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase) def A__ (self): '''simple docstring''' shutil.rmtree(self.tmpdirname) def A__ (self): '''simple docstring''' __UpperCAmelCase =self.get_tokenizer() __UpperCAmelCase =BarkProcessor(tokenizer=UpperCAmelCase) processor.save_pretrained(self.tmpdirname) __UpperCAmelCase =BarkProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) @slow def A__ (self): '''simple docstring''' __UpperCAmelCase =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) __UpperCAmelCase =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''') __UpperCAmelCase =BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) def A__ (self): '''simple docstring''' __UpperCAmelCase =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) __UpperCAmelCase =3_5 __UpperCAmelCase =2 __UpperCAmelCase =8 __UpperCAmelCase ={ '''semantic_prompt''': np.ones(UpperCAmelCase), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len)), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len)), } # test providing already loaded voice_preset __UpperCAmelCase =processor(text=self.input_string , voice_preset=UpperCAmelCase) __UpperCAmelCase =inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase , np.array([])).tolist()) # test loading voice preset from npz file __UpperCAmelCase =os.path.join(self.tmpdirname , '''file.npz''') np.savez(UpperCAmelCase , **UpperCAmelCase) __UpperCAmelCase =processor(text=self.input_string , voice_preset=UpperCAmelCase) __UpperCAmelCase =inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase , np.array([])).tolist()) # test loading voice preset from the hub __UpperCAmelCase =processor(text=self.input_string , voice_preset=self.voice_preset) def A__ (self): '''simple docstring''' __UpperCAmelCase =self.get_tokenizer() __UpperCAmelCase =BarkProcessor(tokenizer=UpperCAmelCase) __UpperCAmelCase =processor(text=self.input_string) __UpperCAmelCase =tokenizer( self.input_string , padding='''max_length''' , max_length=2_5_6 , add_special_tokens=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
142
0
'''simple docstring''' import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __lowerCAmelCase ( UpperCamelCase__ ) -> int: if "model" in orig_key: __lowerCamelCase = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: __lowerCamelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: __lowerCamelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: __lowerCamelCase = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: __lowerCamelCase = orig_key.split('''.''' )[0].split('''_''' )[-1] __lowerCamelCase = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: __lowerCamelCase = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: __lowerCamelCase = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: __lowerCamelCase = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: __lowerCamelCase = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: __lowerCamelCase = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: __lowerCamelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: __lowerCamelCase = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: __lowerCamelCase = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: __lowerCamelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: __lowerCamelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: __lowerCamelCase = '''yoso.''' + orig_key return orig_key def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int: for key in orig_state_dict.copy().keys(): __lowerCamelCase = orig_state_dict.pop(UpperCamelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: __lowerCamelCase = val __lowerCamelCase = orig_state_dict['''cls.predictions.decoder.bias'''] __lowerCamelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: __lowerCamelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict'''] __lowerCamelCase = YosoConfig.from_json_file(UpperCamelCase__ ) __lowerCamelCase = YosoForMaskedLM(UpperCamelCase__ ) __lowerCamelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ ) print(model.load_state_dict(UpperCamelCase__ ) ) model.eval() model.save_pretrained(UpperCamelCase__ ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": __UpperCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __UpperCAmelCase =parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
546
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ ) -> float: __lowerCamelCase = 0 while len(UpperCamelCase__ ) > 1: __lowerCamelCase = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): __lowerCamelCase = files.index(min(UpperCamelCase__ ) ) temp += files[min_index] files.pop(UpperCamelCase__ ) files.append(UpperCamelCase__ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
546
1
def lowerCamelCase ( UpperCAmelCase_ : list[list[float]] )-> list[list[float]]: """simple docstring""" a =[] for data in source_data: for i, el in enumerate(UpperCAmelCase_ ): if len(UpperCAmelCase_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(UpperCAmelCase_ ) ) return data_lists def lowerCamelCase ( UpperCAmelCase_ : list[list[float]] , UpperCAmelCase_ : list[int] )-> list[list[float]]: """simple docstring""" a =[] for dlist, weight in zip(UpperCAmelCase_ , UpperCAmelCase_ ): a =min(UpperCAmelCase_ ) a =max(UpperCAmelCase_ ) a =[] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: a =F'''Invalid weight of {weight:f} provided''' raise ValueError(UpperCAmelCase_ ) score_lists.append(UpperCAmelCase_ ) return score_lists def lowerCamelCase ( UpperCAmelCase_ : list[list[float]] )-> list[float]: """simple docstring""" a =[0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(UpperCAmelCase_ ): a =final_scores[j] + ele return final_scores def lowerCamelCase ( UpperCAmelCase_ : list[list[float]] , UpperCAmelCase_ : list[int] )-> list[list[float]]: """simple docstring""" a =get_data(UpperCAmelCase_ ) a =calculate_each_score(UpperCAmelCase_ , UpperCAmelCase_ ) a =generate_final_scores(UpperCAmelCase_ ) # append scores to source data for i, ele in enumerate(UpperCAmelCase_ ): source_data[i].append(UpperCAmelCase_ ) return source_data
321
from __future__ import annotations def lowerCamelCase ( UpperCAmelCase_ : int | str )-> bool: """simple docstring""" a =str(UpperCAmelCase_ ) return n == n[::-1] def lowerCamelCase ( UpperCAmelCase_ : int = 100_0000 )-> Optional[int]: """simple docstring""" a =0 for i in range(1 , UpperCAmelCase_ ): if is_palindrome(UpperCAmelCase_ ) and is_palindrome(bin(UpperCAmelCase_ ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
321
1
'''simple docstring''' import math __magic_name__ : List[Any] = 10 __magic_name__ : str = 7 __magic_name__ : Optional[int] = BALLS_PER_COLOUR * NUM_COLOURS def A__ ( A_ = 20 ) -> str: _lowercase = math.comb(A_ , A_ ) _lowercase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , A_ ) _lowercase = NUM_COLOURS * (1 - missing_colour / total) return F"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
497
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , __A : Any , __A : Union[str, Any]=1_3 , __A : int=7 , __A : Optional[int]=True , __A : Union[str, Any]=True , __A : Any=9_9 , __A : List[str]=3_2 , __A : Dict=5 , __A : List[str]=4 , __A : List[Any]=3_7 , __A : Any="gelu" , __A : Any=0.1 , __A : Tuple=0.1 , __A : Optional[int]=5_0 , __A : Union[str, Any]=0.0_2 , __A : Optional[Any]=True , __A : Dict=None , ): """simple docstring""" _lowercase = parent _lowercase = batch_size _lowercase = seq_length _lowercase = is_training _lowercase = use_input_mask _lowercase = vocab_size _lowercase = hidden_size _lowercase = num_hidden_layers _lowercase = num_attention_heads _lowercase = intermediate_size _lowercase = hidden_act _lowercase = hidden_dropout_prob _lowercase = attention_probs_dropout_prob _lowercase = max_position_embeddings _lowercase = initializer_range _lowercase = use_labels _lowercase = scope def snake_case ( self : List[str] ): """simple docstring""" _lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase = None if self.use_input_mask: _lowercase = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: _lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase = self.get_config() return config, input_ids, input_mask, token_labels def snake_case ( self : Union[str, Any] ): """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__A , initializer_range=self.initializer_range , ) def snake_case ( self : str ): """simple docstring""" ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) = self.prepare_config_and_inputs() _lowercase = True _lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case ( self : str , __A : str , __A : Optional[Any] , __A : int , __A : int , **__A : Union[str, Any] , ): """simple docstring""" _lowercase = BertGenerationEncoder(config=__A ) model.to(__A ) model.eval() _lowercase = model(__A , attention_mask=__A ) _lowercase = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : Dict , __A : int , __A : List[Any] , __A : List[Any] , __A : str , __A : List[Any] , __A : Optional[Any] , **__A : List[Any] , ): """simple docstring""" _lowercase = True _lowercase = BertGenerationEncoder(config=__A ) model.to(__A ) model.eval() _lowercase = model( __A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , ) _lowercase = model( __A , attention_mask=__A , encoder_hidden_states=__A , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : Union[str, Any] , __A : List[Any] , __A : List[Any] , __A : Optional[int] , __A : Any , __A : Union[str, Any] , __A : List[str] , **__A : str , ): """simple docstring""" _lowercase = True _lowercase = True _lowercase = BertGenerationDecoder(config=__A ).to(__A ).eval() # first forward pass _lowercase = model( __A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , use_cache=__A , ) _lowercase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _lowercase = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowercase = torch.cat([input_mask, next_mask] , dim=-1 ) _lowercase = model( __A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_hidden_states=__A , )["hidden_states"][0] _lowercase = model( __A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )["hidden_states"][0] # select random slice _lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowercase = output_from_no_past[:, -3:, random_slice_idx].detach() _lowercase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) ) def snake_case ( self : List[Any] , __A : Tuple , __A : Optional[int] , __A : Dict , __A : Optional[int] , *__A : Optional[int] , ): """simple docstring""" _lowercase = BertGenerationDecoder(__A ) model.to(__A ) model.eval() _lowercase = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self : List[str] ): """simple docstring""" _lowercase , _lowercase , _lowercase , _lowercase = self.prepare_config_and_inputs() _lowercase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else () UpperCAmelCase__ = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def snake_case ( self : Tuple ): """simple docstring""" _lowercase = BertGenerationEncoderTester(self ) _lowercase = ConfigTester(self , config_class=__A , hidden_size=3_7 ) def snake_case ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self : Optional[Any] ): """simple docstring""" _lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def snake_case ( self : Dict ): """simple docstring""" _lowercase , _lowercase , _lowercase , _lowercase = self.model_tester.prepare_config_and_inputs() _lowercase = "bert" self.model_tester.create_and_check_model(__A , __A , __A , __A ) def snake_case ( self : List[str] ): """simple docstring""" _lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__A ) def snake_case ( self : Optional[Any] ): """simple docstring""" _lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A ) def snake_case ( self : str ): """simple docstring""" # This regression test was failing with PyTorch < 1.3 ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() _lowercase = None self.model_tester.create_and_check_model_as_decoder( __A , __A , __A , __A , __A , __A , ) def snake_case ( self : List[str] ): """simple docstring""" _lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__A ) @slow def snake_case ( self : List[str] ): """simple docstring""" _lowercase = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(__A ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self : int ): """simple docstring""" _lowercase = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) _lowercase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): _lowercase = model(__A )[0] _lowercase = torch.Size([1, 8, 1_0_2_4] ) self.assertEqual(output.shape , __A ) _lowercase = torch.tensor( [[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self : Any ): """simple docstring""" _lowercase = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) _lowercase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): _lowercase = model(__A )[0] _lowercase = torch.Size([1, 8, 5_0_3_5_8] ) self.assertEqual(output.shape , __A ) _lowercase = torch.tensor( [[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
497
1
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase__ = logging.get_logger(__name__) class a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase_ : Dict = 'vision-encoder-decoder' UpperCamelCase_ : List[str] = True def __init__( self : Optional[Any] , **lowerCamelCase__ : List[str] ) -> Dict: """simple docstring""" super().__init__(**lowerCamelCase__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'A configuraton of type {self.model_type} cannot be instantiated because ' f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' ) __lowercase = kwargs.pop('''encoder''' ) __lowercase = encoder_config.pop('''model_type''' ) __lowercase = kwargs.pop('''decoder''' ) __lowercase = decoder_config.pop('''model_type''' ) __lowercase = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ ) __lowercase = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ ) __lowercase = True @classmethod def UpperCAmelCase_ ( cls : List[str] , lowerCamelCase__ : PretrainedConfig , lowerCamelCase__ : PretrainedConfig , **lowerCamelCase__ : Union[str, Any] ) -> PretrainedConfig: """simple docstring""" logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) __lowercase = True __lowercase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase = copy.deepcopy(self.__dict__ ) __lowercase = self.encoder.to_dict() __lowercase = self.decoder.to_dict() __lowercase = self.__class__.model_type return output class a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase_ : Tuple = version.parse('1.11' ) @property def UpperCAmelCase_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase_ ( self : List[str] ) -> float: """simple docstring""" return 1e-4 @property def UpperCAmelCase_ ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" @property def UpperCAmelCase_ ( self : str ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __lowercase = OrderedDict() __lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} __lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} __lowercase = {0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def UpperCAmelCase_ ( self : str , lowerCamelCase__ : "PreTrainedTokenizerBase" , lowerCamelCase__ : int = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]: """simple docstring""" import torch __lowercase = OrderedDict() __lowercase = super().generate_dummy_inputs( lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ ) __lowercase , __lowercase = dummy_input['''input_ids'''].shape __lowercase = (batch, encoder_sequence, self._config.encoder_hidden_size) __lowercase = dummy_input.pop('''input_ids''' ) __lowercase = dummy_input.pop('''attention_mask''' ) __lowercase = torch.zeros(lowerCamelCase__ ) return common_inputs class a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" @property def UpperCAmelCase_ ( self : Any ) -> None: """simple docstring""" pass def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : PretrainedConfig ) -> OnnxConfig: """simple docstring""" return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase__ ) def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : PretrainedConfig , lowerCamelCase__ : PretrainedConfig , lowerCamelCase__ : str = "default" ) -> OnnxConfig: """simple docstring""" __lowercase = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase__ , lowerCamelCase__ )
362
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: UpperCAmelCase__ = None UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} UpperCAmelCase__ = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } UpperCAmelCase__ = { "facebook/mbart-large-en-ro": 1024, "facebook/mbart-large-cc25": 1024, } # fmt: off UpperCAmelCase__ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase_ : int = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[int] = ['input_ids', 'attention_mask'] UpperCamelCase_ : Optional[int] = MBartTokenizer UpperCamelCase_ : List[int] = [] UpperCamelCase_ : List[int] = [] def __init__( self : Tuple , lowerCamelCase__ : str=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : Union[str, Any]="<pad>" , lowerCamelCase__ : List[Any]="<mask>" , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : List[str] , ) -> Tuple: """simple docstring""" __lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token super().__init__( vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) __lowercase = vocab_file __lowercase = False if not self.vocab_file else True __lowercase = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __lowercase = { lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __lowercase = src_lang if src_lang is not None else '''en_XX''' __lowercase = self.convert_tokens_to_ids(self._src_lang ) __lowercase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : str ) -> None: """simple docstring""" __lowercase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] , lowerCamelCase__ : Optional[str] , **lowerCamelCase__ : List[str] ) -> Optional[int]: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __lowercase = src_lang __lowercase = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) __lowercase = self.convert_tokens_to_ids(lowerCamelCase__ ) __lowercase = tgt_lang_id return inputs def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str = "en_XX" , lowerCamelCase__ : Optional[List[str]] = None , lowerCamelCase__ : str = "ro_RO" , **lowerCamelCase__ : Union[str, Any] , ) -> BatchEncoding: """simple docstring""" __lowercase = src_lang __lowercase = tgt_lang return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) def UpperCAmelCase_ ( self : int ) -> Dict: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Optional[Any] ) -> None: """simple docstring""" __lowercase = self.convert_tokens_to_ids(lowerCamelCase__ ) __lowercase = [] __lowercase = [self.eos_token_id, self.cur_lang_code] __lowercase = self.convert_ids_to_tokens(self.prefix_tokens ) __lowercase = self.convert_ids_to_tokens(self.suffix_tokens ) __lowercase = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : str ) -> None: """simple docstring""" __lowercase = self.convert_tokens_to_ids(lowerCamelCase__ ) __lowercase = [] __lowercase = [self.eos_token_id, self.cur_lang_code] __lowercase = self.convert_ids_to_tokens(self.prefix_tokens ) __lowercase = self.convert_ids_to_tokens(self.suffix_tokens ) __lowercase = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowerCamelCase__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return __lowercase = os.path.join( lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ): copyfile(self.vocab_file , lowerCamelCase__ ) return (out_vocab_file,)
362
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase__ ( A_ ): '''simple docstring''' UpperCAmelCase_ = '''bert''' def __init__( self : Dict , UpperCamelCase : int=3_05_22 , UpperCamelCase : str=7_68 , UpperCamelCase : Optional[int]=12 , UpperCamelCase : Any=12 , UpperCamelCase : Union[str, Any]=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : List[str]=5_12 , UpperCamelCase : str=2 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : int=1E-12 , UpperCamelCase : List[str]=0 , UpperCamelCase : Optional[Any]="absolute" , UpperCamelCase : str=True , UpperCamelCase : str=None , **UpperCamelCase : Union[str, Any] , ): """simple docstring""" super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) _lowercase : Any = vocab_size _lowercase : Union[str, Any] = hidden_size _lowercase : List[Any] = num_hidden_layers _lowercase : str = num_attention_heads _lowercase : Any = hidden_act _lowercase : List[Any] = intermediate_size _lowercase : Tuple = hidden_dropout_prob _lowercase : Optional[int] = attention_probs_dropout_prob _lowercase : int = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : int = initializer_range _lowercase : int = layer_norm_eps _lowercase : str = position_embedding_type _lowercase : Tuple = use_cache _lowercase : Tuple = classifier_dropout class UpperCAmelCase__ ( A_ ): '''simple docstring''' @property def lowerCAmelCase_ ( self : Dict ): """simple docstring""" if self.task == "multiple-choice": _lowercase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _lowercase : str = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
322
from __future__ import annotations from typing import Any class UpperCAmelCase__ : '''simple docstring''' def __init__( self : Dict , UpperCamelCase : int = 6 ): """simple docstring""" _lowercase : Node | None = None _lowercase : Node | None = None self.create_linked_list(UpperCamelCase ) def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : int ): """simple docstring""" _lowercase : Union[str, Any] = Node() _lowercase : Any = current_node _lowercase : List[Any] = current_node _lowercase : str = current_node for _ in range(1 , UpperCamelCase ): _lowercase : Any = Node() _lowercase : Union[str, Any] = current_node _lowercase : List[str] = previous_node _lowercase : List[str] = current_node _lowercase : str = self.front _lowercase : Optional[int] = previous_node def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" return ( self.front == self.rear and self.front is not None and self.front.data is None ) def lowerCAmelCase_ ( self : int ): """simple docstring""" self.check_can_perform_operation() return self.front.data if self.front else None def lowerCAmelCase_ ( self : Any , UpperCamelCase : Any ): """simple docstring""" if self.rear is None: return self.check_is_full() if not self.is_empty(): _lowercase : List[str] = self.rear.next if self.rear: _lowercase : Union[str, Any] = data def lowerCAmelCase_ ( self : Any ): """simple docstring""" self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: _lowercase : Optional[int] = self.front.data _lowercase : Any = None return data _lowercase : Union[str, Any] = self.front _lowercase : int = old_front.next _lowercase : Any = old_front.data _lowercase : Any = None return data def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" if self.is_empty(): raise Exception('''Empty Queue''' ) def lowerCAmelCase_ ( self : Any ): """simple docstring""" if self.rear and self.rear.next == self.front: raise Exception('''Full Queue''' ) class UpperCAmelCase__ : '''simple docstring''' def __init__( self : int ): """simple docstring""" _lowercase : Any | None = None _lowercase : Node | None = None _lowercase : Node | None = None if __name__ == "__main__": import doctest doctest.testmod()
322
1
from collections import Counter from timeit import timeit def __lowerCamelCase ( __lowerCAmelCase : str = "" , ) -> bool: return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def __lowerCamelCase ( __lowerCAmelCase : str = "" ) -> bool: if len(__lowerCAmelCase ) == 0: return True __UpperCamelCase : Dict = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string __UpperCamelCase : dict[str, int] = {} for character in lower_case_input_str: __UpperCamelCase : Dict = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1 __UpperCamelCase : List[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def __lowerCamelCase ( __lowerCAmelCase : str = "" ) -> None: print("""\nFor string = """ , __lowerCAmelCase , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": UpperCamelCase = input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) UpperCamelCase = can_string_be_rearranged_as_palindrome_counter(check_str) print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
515
UpperCamelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def __lowerCamelCase ( __lowerCAmelCase : int ) -> int: __UpperCamelCase : List[Any] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCamelCase = [None] * 1000_0000 UpperCamelCase = True UpperCamelCase = False def __lowerCamelCase ( __lowerCAmelCase : int ) -> bool: if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore __UpperCamelCase : Tuple = chain(next_number(__lowerCAmelCase ) ) __UpperCamelCase : Optional[int] = number_chain while number < 10000000: __UpperCamelCase : Tuple = number_chain number *= 10 return number_chain def __lowerCamelCase ( __lowerCAmelCase : int = 10000000 ) -> int: for i in range(1 , __lowerCAmelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution() = }""")
515
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { 'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json', # See all DPT models at https://huggingface.co/models?filter=dpt } class SCREAMING_SNAKE_CASE_ ( lowerCamelCase_ ): """simple docstring""" __snake_case : str = """dpt""" def __init__( self :Optional[Any] , __lowercase :int=768 , __lowercase :Optional[Any]=12 , __lowercase :Optional[int]=12 , __lowercase :int=3072 , __lowercase :Optional[int]="gelu" , __lowercase :List[Any]=0.0 , __lowercase :Union[str, Any]=0.0 , __lowercase :Optional[int]=0.02 , __lowercase :Tuple=1e-1_2 , __lowercase :Optional[Any]=384 , __lowercase :Optional[Any]=16 , __lowercase :str=3 , __lowercase :Dict=False , __lowercase :Optional[int]=True , __lowercase :str=[2, 5, 8, 11] , __lowercase :int="project" , __lowercase :str=[4, 2, 1, 0.5] , __lowercase :Union[str, Any]=[96, 192, 384, 768] , __lowercase :Dict=256 , __lowercase :Tuple=-1 , __lowercase :Tuple=False , __lowercase :List[str]=True , __lowercase :str=0.4 , __lowercase :str=255 , __lowercase :Any=0.1 , __lowercase :Optional[int]=[1, 1024, 24, 24] , __lowercase :Tuple=[0, 1] , __lowercase :Optional[int]=None , **__lowercase :str , ): super().__init__(**__lowercase ) __lowerCamelCase : Optional[int] =hidden_size __lowerCamelCase : Union[str, Any] =is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('''Initializing the config with a `BiT` backbone.''' ) __lowerCamelCase : Optional[Any] ={ "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } __lowerCamelCase : str =BitConfig(**__lowercase ) elif isinstance(__lowercase , __lowercase ): logger.info('''Initializing the config with a `BiT` backbone.''' ) __lowerCamelCase : List[Any] =BitConfig(**__lowercase ) elif isinstance(__lowercase , __lowercase ): __lowerCamelCase : str =backbone_config else: raise ValueError( f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) __lowerCamelCase : Union[str, Any] =backbone_featmap_shape __lowerCamelCase : str =neck_ignore_stages if readout_type != "project": raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' ) else: __lowerCamelCase : Tuple =None __lowerCamelCase : Any =None __lowerCamelCase : Tuple =[] __lowerCamelCase : str =num_hidden_layers __lowerCamelCase : Dict =num_attention_heads __lowerCamelCase : Optional[int] =intermediate_size __lowerCamelCase : Any =hidden_act __lowerCamelCase : Any =hidden_dropout_prob __lowerCamelCase : str =attention_probs_dropout_prob __lowerCamelCase : Any =initializer_range __lowerCamelCase : str =layer_norm_eps __lowerCamelCase : List[Any] =image_size __lowerCamelCase : int =patch_size __lowerCamelCase : Any =num_channels __lowerCamelCase : int =qkv_bias __lowerCamelCase : Optional[int] =backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' ) __lowerCamelCase : Union[str, Any] =readout_type __lowerCamelCase : str =reassemble_factors __lowerCamelCase : Any =neck_hidden_sizes __lowerCamelCase : Dict =fusion_hidden_size __lowerCamelCase : Any =head_in_index __lowerCamelCase : Any =use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) __lowerCamelCase : Union[str, Any] =use_auxiliary_head __lowerCamelCase : str =auxiliary_loss_weight __lowerCamelCase : Dict =semantic_loss_ignore_index __lowerCamelCase : Optional[Any] =semantic_classifier_dropout def __lowercase ( self :Optional[Any] ): __lowerCamelCase : List[str] =copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __lowerCamelCase : List[Any] =self.backbone_config.to_dict() __lowerCamelCase : int =self.__class__.model_type return output
179
'''simple docstring''' import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'): lowerCAmelCase_ = True from torch.cuda.amp import autocast lowerCAmelCase_ = logging.getLogger(__name__) def A__ ( A : str=None , A : Union[str, Any]=None): '''simple docstring''' return field(default_factory=lambda: default , metadata=A) @dataclass class UpperCAmelCase_ : """simple docstring""" __SCREAMING_SNAKE_CASE = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) __SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) __SCREAMING_SNAKE_CASE = field( default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} ) __SCREAMING_SNAKE_CASE = field( default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} ) __SCREAMING_SNAKE_CASE = field( default=0.1 , metadata={ '''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.''' } , ) __SCREAMING_SNAKE_CASE = field( default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , ) __SCREAMING_SNAKE_CASE = field( default=0.05 , metadata={ '''help''': ( '''Propability of each feature vector along the time axis to be chosen as the start of the vector''' '''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature''' '''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.''' ) } , ) __SCREAMING_SNAKE_CASE = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} ) @dataclass class UpperCAmelCase_ : """simple docstring""" __SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __SCREAMING_SNAKE_CASE = field( default='''train+validation''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) __SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) __SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) __SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) __SCREAMING_SNAKE_CASE = field( default=lowerCamelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of validation examples to this ''' '''value if set.''' ) } , ) __SCREAMING_SNAKE_CASE = list_field( default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , ) @dataclass class UpperCAmelCase_ : """simple docstring""" __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def __call__( self , lowerCamelCase ) -> Dict[str, torch.Tensor]: '''simple docstring''' UpperCamelCase : Dict = [{"input_values": feature["input_values"]} for feature in features] UpperCamelCase : str = [{"input_ids": feature["labels"]} for feature in features] UpperCamelCase : Optional[Any] = self.processor.pad( lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) UpperCamelCase : Union[str, Any] = self.processor.pad( labels=lowerCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , ) # replace padding with -100 to ignore loss correctly UpperCamelCase : int = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 ) UpperCamelCase : Any = labels return batch class UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> torch.Tensor: '''simple docstring''' model.train() UpperCamelCase : List[Any] = self._prepare_inputs(lowerCamelCase ) if self.use_amp: with autocast(): UpperCamelCase : Union[str, Any] = self.compute_loss(lowerCamelCase , lowerCamelCase ) else: UpperCamelCase : List[str] = self.compute_loss(lowerCamelCase , lowerCamelCase ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": UpperCamelCase : Optional[Any] = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": UpperCamelCase : str = loss.sum() / (inputs["labels"] >= 0).sum() else: raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: UpperCamelCase : List[Any] = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(lowerCamelCase ).backward() elif self.use_apex: with amp.scale_loss(lowerCamelCase , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(lowerCamelCase ) else: loss.backward() return loss.detach() def A__ ( ): '''simple docstring''' UpperCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. UpperCamelCase : Any = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase : Dict = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome.") elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch.") # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''') # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , A) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: UpperCamelCase : Union[str, Any] = datasets.load_dataset( "common_voice" , data_args.dataset_config_name , split=data_args.train_split_name) UpperCamelCase : Optional[int] = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test") # Create and save tokenizer UpperCamelCase : List[str] = F'''[{''.join(data_args.chars_to_ignore)}]''' def remove_special_characters(A : List[Any]): UpperCamelCase : Optional[int] = re.sub(A , "" , batch["sentence"]).lower() + " " return batch UpperCamelCase : Any = train_dataset.map(A , remove_columns=["sentence"]) UpperCamelCase : int = eval_dataset.map(A , remove_columns=["sentence"]) def extract_all_chars(A : Union[str, Any]): UpperCamelCase : Tuple = " ".join(batch["text"]) UpperCamelCase : Optional[Any] = list(set(A)) return {"vocab": [vocab], "all_text": [all_text]} UpperCamelCase : Tuple = train_dataset.map( A , batched=A , batch_size=-1 , keep_in_memory=A , remove_columns=train_dataset.column_names , ) UpperCamelCase : Optional[Any] = train_dataset.map( A , batched=A , batch_size=-1 , keep_in_memory=A , remove_columns=eval_dataset.column_names , ) UpperCamelCase : Dict = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0])) UpperCamelCase : Tuple = {v: k for k, v in enumerate(A)} UpperCamelCase : Tuple = vocab_dict[" "] del vocab_dict[" "] UpperCamelCase : List[str] = len(A) UpperCamelCase : Dict = len(A) with open("vocab.json" , "w") as vocab_file: json.dump(A , A) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase : int = WavaVecaCTCTokenizer( "vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , ) UpperCamelCase : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=A , return_attention_mask=A) UpperCamelCase : int = WavaVecaProcessor(feature_extractor=A , tokenizer=A) UpperCamelCase : str = WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer) , ) if data_args.max_train_samples is not None: UpperCamelCase : Union[str, Any] = min(len(A) , data_args.max_train_samples) UpperCamelCase : int = train_dataset.select(range(A)) if data_args.max_val_samples is not None: UpperCamelCase : Dict = eval_dataset.select(range(data_args.max_val_samples)) UpperCamelCase : Union[str, Any] = torchaudio.transforms.Resample(4_80_00 , 1_60_00) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(A : Union[str, Any]): UpperCamelCase , UpperCamelCase : List[str] = torchaudio.load(batch["path"]) UpperCamelCase : List[str] = resampler(A).squeeze().numpy() UpperCamelCase : Dict = 1_60_00 UpperCamelCase : str = batch["text"] return batch UpperCamelCase : int = train_dataset.map( A , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) UpperCamelCase : int = eval_dataset.map( A , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(A : Dict): # check that all files have the correct sampling rate assert ( len(set(batch["sampling_rate"])) == 1 ), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.''' UpperCamelCase : Union[str, Any] = processor( audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0]) batch.update(A) return batch UpperCamelCase : str = train_dataset.map( A , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A , num_proc=data_args.preprocessing_num_workers , ) UpperCamelCase : Union[str, Any] = eval_dataset.map( A , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A , num_proc=data_args.preprocessing_num_workers , ) # Metric UpperCamelCase : Tuple = datasets.load_metric("wer") def compute_metrics(A : int): UpperCamelCase : Union[str, Any] = pred.predictions UpperCamelCase : Tuple = np.argmax(A , axis=-1) UpperCamelCase : int = processor.tokenizer.pad_token_id UpperCamelCase : Union[str, Any] = processor.batch_decode(A) # we do not want to group tokens when computing the metrics UpperCamelCase : List[Any] = processor.batch_decode(pred.label_ids , group_tokens=A) UpperCamelCase : Optional[Any] = wer_metric.compute(predictions=A , references=A) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator UpperCamelCase : Dict = DataCollatorCTCWithPadding(processor=A , padding=A) # Initialize our Trainer UpperCamelCase : int = CTCTrainer( model=A , data_collator=A , args=A , compute_metrics=A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: UpperCamelCase : List[Any] = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): UpperCamelCase : Tuple = model_args.model_name_or_path else: UpperCamelCase : str = None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank): processor.save_pretrained(training_args.output_dir) UpperCamelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=A) trainer.save_model() UpperCamelCase : int = train_result.metrics UpperCamelCase : int = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(A) ) UpperCamelCase : int = min(A , len(A)) trainer.log_metrics("train" , A) trainer.save_metrics("train" , A) trainer.save_state() # Evaluation UpperCamelCase : int = {} if training_args.do_eval: logger.info("*** Evaluate ***") UpperCamelCase : Optional[Any] = trainer.evaluate() UpperCamelCase : int = data_args.max_val_samples if data_args.max_val_samples is not None else len(A) UpperCamelCase : Dict = min(A , len(A)) trainer.log_metrics("eval" , A) trainer.save_metrics("eval" , A) return results if __name__ == "__main__": main()
173
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __lowerCAmelCase :str = argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) __lowerCAmelCase :Tuple = parser.parse_args() __lowerCAmelCase :Any = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __lowerCAmelCase :int = CLIPImageProcessor() __lowerCAmelCase :Optional[int] = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') __lowerCAmelCase :int = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
709
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _a( __A , unittest.TestCase ): lowerCamelCase__ :Optional[int] = AudioLDMPipeline lowerCamelCase__ :List[Any] = TEXT_TO_AUDIO_PARAMS lowerCamelCase__ :List[Any] = TEXT_TO_AUDIO_BATCH_PARAMS lowerCamelCase__ :Dict = frozenset( [ 'num_inference_steps', 'num_waveforms_per_prompt', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) def lowercase ( self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) _snake_case : Union[str, Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(3_2, 6_4) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=__snake_case , ) _snake_case : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0 ) _snake_case : Dict = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _snake_case : Tuple = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , ) _snake_case : Optional[int] = ClapTextModelWithProjection(__snake_case ) _snake_case : Optional[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=7_7 ) _snake_case : Dict = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__snake_case , ) _snake_case : Dict = SpeechTaHifiGan(__snake_case ) _snake_case : Dict = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def lowercase ( self , __snake_case , __snake_case=0 ) -> Union[str, Any]: '''simple docstring''' if str(__snake_case ).startswith("mps" ): _snake_case : List[Any] = torch.manual_seed(__snake_case ) else: _snake_case : Tuple = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _snake_case : List[str] = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def lowercase ( self ) -> Any: '''simple docstring''' _snake_case : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Union[str, Any] = self.get_dummy_components() _snake_case : Any = AudioLDMPipeline(**__snake_case ) _snake_case : List[str] = audioldm_pipe.to(__snake_case ) audioldm_pipe.set_progress_bar_config(disable=__snake_case ) _snake_case : List[str] = self.get_dummy_inputs(__snake_case ) _snake_case : Any = audioldm_pipe(**__snake_case ) _snake_case : Tuple = output.audios[0] assert audio.ndim == 1 assert len(__snake_case ) == 2_5_6 _snake_case : Any = audio[:1_0] _snake_case : Dict = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def lowercase ( self ) -> Tuple: '''simple docstring''' _snake_case : Tuple = self.get_dummy_components() _snake_case : Any = AudioLDMPipeline(**__snake_case ) _snake_case : Union[str, Any] = audioldm_pipe.to(__snake_case ) _snake_case : List[str] = audioldm_pipe.to(__snake_case ) audioldm_pipe.set_progress_bar_config(disable=__snake_case ) _snake_case : Optional[int] = self.get_dummy_inputs(__snake_case ) _snake_case : List[str] = 3 * [inputs["prompt"]] # forward _snake_case : Dict = audioldm_pipe(**__snake_case ) _snake_case : Union[str, Any] = output.audios[0] _snake_case : List[str] = self.get_dummy_inputs(__snake_case ) _snake_case : Tuple = 3 * [inputs.pop("prompt" )] _snake_case : int = audioldm_pipe.tokenizer( __snake_case , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__snake_case , return_tensors="pt" , ) _snake_case : Tuple = text_inputs["input_ids"].to(__snake_case ) _snake_case : Dict = audioldm_pipe.text_encoder( __snake_case , ) _snake_case : Tuple = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state _snake_case : List[Any] = F.normalize(__snake_case , dim=-1 ) _snake_case : Union[str, Any] = prompt_embeds # forward _snake_case : int = audioldm_pipe(**__snake_case ) _snake_case : Dict = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def lowercase ( self ) -> List[str]: '''simple docstring''' _snake_case : int = self.get_dummy_components() _snake_case : Optional[int] = AudioLDMPipeline(**__snake_case ) _snake_case : Tuple = audioldm_pipe.to(__snake_case ) _snake_case : Any = audioldm_pipe.to(__snake_case ) audioldm_pipe.set_progress_bar_config(disable=__snake_case ) _snake_case : Tuple = self.get_dummy_inputs(__snake_case ) _snake_case : Union[str, Any] = 3 * ["this is a negative prompt"] _snake_case : int = negative_prompt _snake_case : Tuple = 3 * [inputs["prompt"]] # forward _snake_case : Tuple = audioldm_pipe(**__snake_case ) _snake_case : Optional[Any] = output.audios[0] _snake_case : List[str] = self.get_dummy_inputs(__snake_case ) _snake_case : Dict = 3 * [inputs.pop("prompt" )] _snake_case : List[Any] = [] for p in [prompt, negative_prompt]: _snake_case : Optional[Any] = audioldm_pipe.tokenizer( __snake_case , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__snake_case , return_tensors="pt" , ) _snake_case : List[Any] = text_inputs["input_ids"].to(__snake_case ) _snake_case : List[Any] = audioldm_pipe.text_encoder( __snake_case , ) _snake_case : Union[str, Any] = text_embeds.text_embeds # additional L_2 normalization over each hidden-state _snake_case : int = F.normalize(__snake_case , dim=-1 ) embeds.append(__snake_case ) _snake_case , _snake_case : int = embeds # forward _snake_case : Any = audioldm_pipe(**__snake_case ) _snake_case : Dict = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def lowercase ( self ) -> Optional[int]: '''simple docstring''' _snake_case : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Tuple = self.get_dummy_components() _snake_case : str = PNDMScheduler(skip_prk_steps=__snake_case ) _snake_case : Optional[int] = AudioLDMPipeline(**__snake_case ) _snake_case : Tuple = audioldm_pipe.to(__snake_case ) audioldm_pipe.set_progress_bar_config(disable=__snake_case ) _snake_case : Tuple = self.get_dummy_inputs(__snake_case ) _snake_case : str = "egg cracking" _snake_case : int = audioldm_pipe(**__snake_case , negative_prompt=__snake_case ) _snake_case : Union[str, Any] = output.audios[0] assert audio.ndim == 1 assert len(__snake_case ) == 2_5_6 _snake_case : List[str] = audio[:1_0] _snake_case : Optional[int] = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def lowercase ( self ) -> Dict: '''simple docstring''' _snake_case : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Optional[Any] = self.get_dummy_components() _snake_case : List[Any] = PNDMScheduler(skip_prk_steps=__snake_case ) _snake_case : Union[str, Any] = AudioLDMPipeline(**__snake_case ) _snake_case : int = audioldm_pipe.to(__snake_case ) audioldm_pipe.set_progress_bar_config(disable=__snake_case ) _snake_case : List[Any] = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) _snake_case : Dict = audioldm_pipe(__snake_case , num_inference_steps=2 ).audios assert audios.shape == (1, 2_5_6) # test num_waveforms_per_prompt=1 (default) for batch of prompts _snake_case : int = 2 _snake_case : Optional[int] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_5_6) # test num_waveforms_per_prompt for single prompt _snake_case : List[Any] = 2 _snake_case : List[str] = audioldm_pipe(__snake_case , num_inference_steps=2 , num_waveforms_per_prompt=__snake_case ).audios assert audios.shape == (num_waveforms_per_prompt, 2_5_6) # test num_waveforms_per_prompt for batch of prompts _snake_case : Optional[int] = 2 _snake_case : str = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__snake_case ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6) def lowercase ( self ) -> int: '''simple docstring''' _snake_case : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : int = self.get_dummy_components() _snake_case : Union[str, Any] = AudioLDMPipeline(**__snake_case ) _snake_case : Any = audioldm_pipe.to(__snake_case ) audioldm_pipe.set_progress_bar_config(disable=__snake_case ) _snake_case : Tuple = audioldm_pipe.vocoder.config.sampling_rate _snake_case : str = self.get_dummy_inputs(__snake_case ) _snake_case : List[Any] = audioldm_pipe(audio_length_in_s=0.0_16 , **__snake_case ) _snake_case : List[str] = output.audios[0] assert audio.ndim == 1 assert len(__snake_case ) / vocoder_sampling_rate == 0.0_16 _snake_case : Tuple = audioldm_pipe(audio_length_in_s=0.0_32 , **__snake_case ) _snake_case : Union[str, Any] = output.audios[0] assert audio.ndim == 1 assert len(__snake_case ) / vocoder_sampling_rate == 0.0_32 def lowercase ( self ) -> Optional[int]: '''simple docstring''' _snake_case : Union[str, Any] = self.get_dummy_components() _snake_case : Any = AudioLDMPipeline(**__snake_case ) _snake_case : List[str] = audioldm_pipe.to(__snake_case ) audioldm_pipe.set_progress_bar_config(disable=__snake_case ) _snake_case : int = ["hey"] _snake_case : int = audioldm_pipe(__snake_case , num_inference_steps=1 ) _snake_case : List[Any] = output.audios.shape assert audio_shape == (1, 2_5_6) _snake_case : Optional[Any] = audioldm_pipe.vocoder.config config.model_in_dim *= 2 _snake_case : str = SpeechTaHifiGan(__snake_case ).to(__snake_case ) _snake_case : Any = audioldm_pipe(__snake_case , num_inference_steps=1 ) _snake_case : Tuple = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_5_6) def lowercase ( self ) -> Any: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__snake_case ) def lowercase ( self ) -> List[Any]: '''simple docstring''' self._test_inference_batch_single_identical(test_mean_pixel_difference=__snake_case ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowercase ( self ) -> Dict: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__snake_case ) @slow class _a( unittest.TestCase ): def lowercase ( self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase ( self , __snake_case , __snake_case="cpu" , __snake_case=torch.floataa , __snake_case=0 ) -> Any: '''simple docstring''' _snake_case : int = torch.Generator(device=__snake_case ).manual_seed(__snake_case ) _snake_case : Dict = np.random.RandomState(__snake_case ).standard_normal((1, 8, 1_2_8, 1_6) ) _snake_case : Any = torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case ) _snake_case : List[Any] = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def lowercase ( self ) -> Dict: '''simple docstring''' _snake_case : List[Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) _snake_case : str = audioldm_pipe.to(__snake_case ) audioldm_pipe.set_progress_bar_config(disable=__snake_case ) _snake_case : Tuple = self.get_inputs(__snake_case ) _snake_case : Optional[Any] = 2_5 _snake_case : str = audioldm_pipe(**__snake_case ).audios[0] assert audio.ndim == 1 assert len(__snake_case ) == 8_1_9_2_0 _snake_case : Any = audio[7_7_2_3_0:7_7_2_4_0] _snake_case : str = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) _snake_case : List[str] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def lowercase ( self ) -> Union[str, Any]: '''simple docstring''' _snake_case : Tuple = AudioLDMPipeline.from_pretrained("cvssp/audioldm" ) _snake_case : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) _snake_case : int = audioldm_pipe.to(__snake_case ) audioldm_pipe.set_progress_bar_config(disable=__snake_case ) _snake_case : Any = self.get_inputs(__snake_case ) _snake_case : Optional[Any] = audioldm_pipe(**__snake_case ).audios[0] assert audio.ndim == 1 assert len(__snake_case ) == 8_1_9_2_0 _snake_case : str = audio[2_7_7_8_0:2_7_7_9_0] _snake_case : Any = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) _snake_case : Tuple = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
278
0
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer _snake_case = logging.get_logger(__name__) _snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _snake_case = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } _snake_case = { '''allenai/led-base-16384''': 16_384, } class _lowerCAmelCase ( __magic_name__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Tuple =LEDTokenizer SCREAMING_SNAKE_CASE_ : List[str] =["input_ids", "attention_mask"] def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Any="replace" , SCREAMING_SNAKE_CASE__ : str="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="</s>" , SCREAMING_SNAKE_CASE__ : str="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : str="<mask>" , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : List[Any]=True , **SCREAMING_SNAKE_CASE__ : str , ): """simple docstring""" super().__init__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space: UpperCamelCase = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('type' ) ) UpperCamelCase = add_prefix_space UpperCamelCase = pre_tok_class(**SCREAMING_SNAKE_CASE__ ) UpperCamelCase = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCamelCase = 'post_processor' UpperCamelCase = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if tokenizer_component_instance: UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase = tuple(state['sep'] ) if "cls" in state: UpperCamelCase = tuple(state['cls'] ) UpperCamelCase = False if state.get('add_prefix_space' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space: UpperCamelCase = add_prefix_space UpperCamelCase = True if state.get('trim_offsets' , SCREAMING_SNAKE_CASE__ ) != trim_offsets: UpperCamelCase = trim_offsets UpperCamelCase = True if changes_to_apply: UpperCamelCase = getattr(SCREAMING_SNAKE_CASE__ , state.pop('type' ) ) UpperCamelCase = component_class(**SCREAMING_SNAKE_CASE__ ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value UpperCamelCase = value def __lowerCAmelCase ( self : List[str] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" UpperCamelCase = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" UpperCamelCase = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): """simple docstring""" UpperCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=None ): """simple docstring""" UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[Dict[str, EncodedInput], BatchEncoding] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ): """simple docstring""" UpperCamelCase = super()._pad( encoded_inputs=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase = len(encoded_inputs['global_attention_mask'] ) != len(SCREAMING_SNAKE_CASE__ ) if needs_to_be_padded: UpperCamelCase = len(SCREAMING_SNAKE_CASE__ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
282
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class _lowerCAmelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] =None def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) UpperCamelCase = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Tuple ): """simple docstring""" UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE__ , 'feat_extract.json' ) feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE__ ) UpperCamelCase = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __lowerCAmelCase ( self : Tuple ): """simple docstring""" UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE__ )[0] check_json_file_has_correct_format(SCREAMING_SNAKE_CASE__ ) UpperCamelCase = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.feature_extraction_class() self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
282
1
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self : Any , lowerCamelCase__ : TransformeraDModel , lowerCamelCase__ : AutoencoderKL , lowerCamelCase__ : KarrasDiffusionSchedulers , lowerCamelCase__ : Optional[Dict[int, str]] = None , ) -> Dict: """simple docstring""" super().__init__() self.register_modules(transformer=lowerCamelCase__ , vae=lowerCamelCase__ , scheduler=lowerCamelCase__ ) # create a imagenet -> id dictionary for easier use __lowercase = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(''',''' ): __lowercase = int(lowerCamelCase__ ) __lowercase = dict(sorted(self.labels.items() ) ) def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : Union[str, List[str]] ) -> List[int]: """simple docstring""" if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowercase = list(lowerCamelCase__ ) for l in label: if l not in self.labels: raise ValueError( f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : float = 4.0 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" __lowercase = len(lowerCamelCase__ ) __lowercase = self.transformer.config.sample_size __lowercase = self.transformer.config.in_channels __lowercase = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCamelCase__ , device=self.device , dtype=self.transformer.dtype , ) __lowercase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents __lowercase = torch.tensor(lowerCamelCase__ , device=self.device ).reshape(-1 ) __lowercase = torch.tensor([1_000] * batch_size , device=self.device ) __lowercase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowerCamelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: __lowercase = latent_model_input[: len(lowerCamelCase__ ) // 2] __lowercase = torch.cat([half, half] , dim=0 ) __lowercase = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ ) __lowercase = t if not torch.is_tensor(lowerCamelCase__ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) __lowercase = latent_model_input.device.type == '''mps''' if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowercase = torch.floataa if is_mps else torch.floataa else: __lowercase = torch.intaa if is_mps else torch.intaa __lowercase = torch.tensor([timesteps] , dtype=lowerCamelCase__ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: __lowercase = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowercase = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output __lowercase = self.transformer( lowerCamelCase__ , timestep=lowerCamelCase__ , class_labels=lowerCamelCase__ ).sample # perform guidance if guidance_scale > 1: __lowercase , __lowercase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] __lowercase , __lowercase = torch.split(lowerCamelCase__ , len(lowerCamelCase__ ) // 2 , dim=0 ) __lowercase = uncond_eps + guidance_scale * (cond_eps - uncond_eps) __lowercase = torch.cat([half_eps, half_eps] , dim=0 ) __lowercase = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: __lowercase , __lowercase = torch.split(lowerCamelCase__ , lowerCamelCase__ , dim=1 ) else: __lowercase = noise_pred # compute previous image: x_t -> x_t-1 __lowercase = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample if guidance_scale > 1: __lowercase , __lowercase = latent_model_input.chunk(2 , dim=0 ) else: __lowercase = latent_model_input __lowercase = 1 / self.vae.config.scaling_factor * latents __lowercase = self.vae.decode(lowerCamelCase__ ).sample __lowercase = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __lowercase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(lowerCamelCase__ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowerCamelCase__ )
700
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self : List[str] ) -> Tuple: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = BlipImageProcessor() __lowercase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) __lowercase = BlipaProcessor(lowerCamelCase__ , lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self : Optional[int] , **lowerCamelCase__ : Any ) -> str: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).tokenizer def UpperCAmelCase_ ( self : Dict , **lowerCamelCase__ : List[str] ) -> Dict: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).image_processor def UpperCAmelCase_ ( self : List[str] ) -> str: """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self : Any ) -> Any: """simple docstring""" __lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowercase = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 ) __lowercase = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Any: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(lowerCamelCase__ , return_tensors='''np''' ) __lowercase = processor(images=lowerCamelCase__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __lowercase = '''lower newer''' __lowercase = processor(text=lowerCamelCase__ ) __lowercase = tokenizer(lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase_ ( self : Dict ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __lowercase = '''lower newer''' __lowercase = self.prepare_image_inputs() __lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase__ ): processor() def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowercase = processor.batch_decode(lowerCamelCase__ ) __lowercase = tokenizer.batch_decode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def UpperCAmelCase_ ( self : List[str] ) -> Any: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ ) __lowercase = '''lower newer''' __lowercase = self.prepare_image_inputs() __lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
362
0
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 4_000_000 ): """simple docstring""" UpperCamelCase = [0, 1] UpperCamelCase = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 UpperCamelCase = 0 for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'''{solution() = }''')
386
import math def _lowercase ( SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCamelCase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=1 , **SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" UpperCamelCase = factor * value UpperCamelCase = value while not is_prime(SCREAMING_SNAKE_CASE_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ ) return value
386
1
"""simple docstring""" # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class __a : def __init__( self , a__ , a__ , a__ = True , a__ = False ): _lowerCamelCase = scheduler _lowerCamelCase = optimizers if isinstance(a__ , (list, tuple) ) else [optimizers] _lowerCamelCase = split_batches _lowerCamelCase = step_with_optimizer _lowerCamelCase = GradientState() def snake_case_ ( self , *a__ , **a__ ): if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*a__ , **a__ ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*a__ , **a__ ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step _lowerCamelCase = AcceleratorState().num_processes for _ in range(a__ ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*a__ , **a__ ) else: self.scheduler.step(*a__ , **a__ ) def snake_case_ ( self ): return self.scheduler.get_last_lr() def snake_case_ ( self ): return self.scheduler.state_dict() def snake_case_ ( self , a__ ): self.scheduler.load_state_dict(a__ ) def snake_case_ ( self ): return self.scheduler.get_lr() def snake_case_ ( self , *a__ , **a__ ): return self.scheduler.print_lr(*a__ , **a__ )
222
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE__ : Optional[int] = IFInpaintingPipeline SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE__ : List[str] = PipelineTesterMixin.required_optional_params - {"latents"} def snake_case_ ( self ): return self._get_dummy_components() def snake_case_ ( self , a__ , a__=0 ): if str(a__ ).startswith('mps' ): _lowerCamelCase = torch.manual_seed(a__ ) else: _lowerCamelCase = torch.Generator(device=a__ ).manual_seed(a__ ) _lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ ) _lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ ) _lowerCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def snake_case_ ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def snake_case_ ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def snake_case_ ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def snake_case_ ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def snake_case_ ( self ): self._test_save_load_local() def snake_case_ ( self ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
222
1
import math def __SCREAMING_SNAKE_CASE ( a__ : int ) -> Optional[int]: __A : Any = [] __A : List[str] = 2 __A : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment __A : str = [True] * (end + 1) __A : Any = [] while start <= end: if temp[start] is True: in_prime.append(lowerCAmelCase_ ) for i in range(start * start ,end + 1 ,lowerCAmelCase_ ): __A : Union[str, Any] = False start += 1 prime += in_prime __A : Dict = end + 1 __A : Dict = min(2 * end ,lowerCAmelCase_ ) while low <= n: __A : Any = [True] * (high - low + 1) for each in in_prime: __A : Optional[Any] = math.floor(low / each ) * each if t < low: t += each for j in range(lowerCAmelCase_ ,high + 1 ,lowerCAmelCase_ ): __A : List[Any] = False for j in range(len(lowerCAmelCase_ ) ): if temp[j] is True: prime.append(j + low ) __A : int = high + 1 __A : Union[str, Any] = min(high + end ,lowerCAmelCase_ ) return prime print(sieve(10**6))
17
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ): snake_case_ ,snake_case_ : Dict = position snake_case_ : int = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] snake_case_ : Union[str, Any] = [] for position in positions: snake_case_ ,snake_case_ : Union[str, Any] = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(lowerCAmelCase_ ) return permissible_positions def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ): if is_complete(lowerCAmelCase_ ): return True for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ): snake_case_ ,snake_case_ : Dict = position if board[y][x] == 0: snake_case_ : List[str] = curr + 1 if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ): return True snake_case_ : Dict = 0 return False def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ): snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )] for i in range(lowerCAmelCase_ ): for j in range(lowerCAmelCase_ ): snake_case_ : Optional[Any] = 1 if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ): return board snake_case_ : Dict = 0 snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
666
0
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase ( UpperCamelCase_ ): def __init__( self : Optional[int] , a__ : VQModel , a__ : UNetaDModel , a__ : DDIMScheduler ): '''simple docstring''' super().__init__() self.register_modules(vqvae=a__ , unet=a__ , scheduler=a__ ) @torch.no_grad() def __call__( self : int , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : float = 0.0 , a__ : int = 50 , a__ : Optional[str] = "pil" , a__ : bool = True , **a__ : Tuple , ): '''simple docstring''' lowerCAmelCase__ : List[Any] = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a__ , ) lowerCAmelCase__ : str = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCAmelCase__ : Tuple = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(a__ ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature lowerCAmelCase__ : Optional[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCAmelCase__ : str = {} if accepts_eta: lowerCAmelCase__ : Optional[int] = eta for t in self.progress_bar(self.scheduler.timesteps ): lowerCAmelCase__ : List[str] = self.scheduler.scale_model_input(a__ , a__ ) # predict the noise residual lowerCAmelCase__ : List[Any] = self.unet(a__ , a__ ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase__ : List[Any] = self.scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample # decode the image latents with the VAE lowerCAmelCase__ : str = self.vqvae.decode(a__ ).sample lowerCAmelCase__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase__ : List[str] = self.numpy_to_pil(a__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=a__ )
568
'''simple docstring''' import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def UpperCAmelCase_ ( ): """simple docstring""" lowerCAmelCase__ : int = torch.nn.Linear(2 , 4 ) lowerCAmelCase__ : List[str] = torch.optim.AdamW(model.parameters() , lr=1.0 ) lowerCAmelCase__ : List[Any] = torch.optim.lr_scheduler.OneCycleLR(lowerCamelCase_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) lowerCAmelCase__ : List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) lowerCAmelCase__ : int = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : Tuple = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(lowerCamelCase_ ) class lowerCAmelCase ( UpperCamelCase_ ): @require_cuda def _A ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a__ ): lowerCAmelCase__ : int = Accelerator(cpu=a__ ) def _A ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = Accelerator() lowerCAmelCase__ : Union[str, Any] = GradientState() assert state.num_steps == 1 lowerCAmelCase__ : Any = 4 assert state.num_steps == 4 assert state.sync_gradients is True lowerCAmelCase__ : Tuple = False assert state.sync_gradients is False GradientState._reset_state() def _A ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = Accelerator() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = create_components() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Dict = accelerator.prepare(a__ , a__ , a__ , a__ , a__ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def _A ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = Accelerator() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = create_components() accelerator.prepare(a__ , a__ , a__ , a__ , a__ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def _A ( self : Dict ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a__ : Optional[int] , **a__ : List[Any] ): pass with patch("torch.cuda.set_device" , a__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ): lowerCAmelCase__ : Any = Accelerator() self.assertEqual(str(accelerator.state.device ) , "cuda:64" ) def _A ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = Accelerator() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = create_components() accelerator.prepare(a__ , a__ , a__ , a__ , a__ ) lowerCAmelCase__ : int = get_signature(a__ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a__ ) # make sure random weights don't match load_random_weights(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 ) def _A ( self : str ): '''simple docstring''' lowerCAmelCase__ : List[Any] = Accelerator() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = create_components() accelerator.prepare(a__ , a__ , a__ , a__ , a__ ) lowerCAmelCase__ : List[str] = get_signature(a__ ) # saving hook def save_config(a__ : List[Any] , a__ : Tuple , a__ : List[Any] ): lowerCAmelCase__ : Any = {"class_name": models[0].__class__.__name__} with open(os.path.join(a__ , "data.json" ) , "w" ) as f: json.dump(a__ , a__ ) # loading hook def load_config(a__ : Any , a__ : Tuple ): with open(os.path.join(a__ , "data.json" ) , "r" ) as f: lowerCAmelCase__ : str = json.load(a__ ) lowerCAmelCase__ : Tuple = config["class_name"] lowerCAmelCase__ : List[Any] = accelerator.register_save_state_pre_hook(a__ ) lowerCAmelCase__ : Optional[Any] = accelerator.register_load_state_pre_hook(a__ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a__ ) # make sure random weights don't match with hooks load_random_weights(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 ) # random class name to verify correct one is loaded lowerCAmelCase__ : Tuple = "random" # make sure loaded weights match with hooks accelerator.load_state(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a__ ) # make sure random weights don't match with hooks removed load_random_weights(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 ) # random class name to verify correct one is loaded lowerCAmelCase__ : Union[str, Any] = "random" # make sure loaded weights match with hooks removed accelerator.load_state(a__ ) self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def _A ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[str] = Accelerator() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = create_components() lowerCAmelCase__ : str = None # This should work lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = accelerator.prepare( a__ , a__ , a__ , a__ , a__ , a__ ) self.assertTrue(dummy_obj is None ) def _A ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : int = Accelerator() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = create_components() lowerCAmelCase__ : Dict = [1, 2, 3] # This should work lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = accelerator.prepare( a__ , a__ , a__ , a__ , a__ , a__ ) self.assertEqual( getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , ) self.assertEqual( getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(a__ , "_is_accelerate_prepared" , a__ ) , a__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) @slow @require_bnb def _A ( self : List[str] ): '''simple docstring''' from transformers import AutoModelForCausalLM lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=a__ , device_map={"": 0} , ) lowerCAmelCase__ : Optional[Any] = Accelerator() # This should work lowerCAmelCase__ : List[Any] = accelerator.prepare(a__ ) @slow @require_bnb def _A ( self : Optional[int] ): '''simple docstring''' from transformers import AutoModelForCausalLM lowerCAmelCase__ : Optional[Any] = Accelerator() with init_empty_weights(): lowerCAmelCase__ : Any = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() lowerCAmelCase__ : int = infer_auto_device_map(a__ ) lowerCAmelCase__ : str = "cpu" lowerCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , device_map=a__ , load_in_abit=a__ , llm_inta_enable_fpaa_cpu_offload=a__ ) # This should not work and get value error with self.assertRaises(a__ ): lowerCAmelCase__ : Tuple = accelerator.prepare(a__ ) @slow @require_bnb @require_multi_gpu def _A ( self : Optional[int] ): '''simple docstring''' from transformers import AutoModelForCausalLM lowerCAmelCase__ : Optional[Any] = {"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): lowerCAmelCase__ : str = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() lowerCAmelCase__ : List[Any] = infer_auto_device_map(a__ ) lowerCAmelCase__ : Dict = 1 lowerCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=a__ , device_map=a__ , ) lowerCAmelCase__ : Dict = Accelerator() # This should not work and get value error with self.assertRaises(a__ ): lowerCAmelCase__ : List[Any] = accelerator.prepare(a__ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def _A ( self : Dict ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): lowerCAmelCase__ : int = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) lowerCAmelCase__ : Union[str, Any] = infer_auto_device_map(a__ ) lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Optional[int] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=a__ , device_map=a__ , ) lowerCAmelCase__ : int = Accelerator() # This should work lowerCAmelCase__ : int = accelerator.prepare(a__ ) @require_cuda def _A ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Tuple = torch.nn.Linear(10 , 10 ) lowerCAmelCase__ : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01 ) lowerCAmelCase__ : List[Any] = Accelerator(cpu=a__ ) lowerCAmelCase__ : int = accelerator.prepare(a__ )
568
1
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
581
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str ): def get_masked_lm_array(_lowerCamelCase : str ): __a : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' __a : Dict = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase ) if "kernel" in name: __a : List[str] = array.transpose() return torch.from_numpy(_lowerCamelCase ) def get_encoder_array(_lowerCamelCase : str ): __a : Optional[Any] = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' __a : Any = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase ) if "kernel" in name: __a : List[Any] = array.transpose() return torch.from_numpy(_lowerCamelCase ) def get_encoder_layer_array(_lowerCamelCase : int , _lowerCamelCase : str ): __a : Dict = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' __a : List[str] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase ) if "kernel" in name: __a : List[str] = array.transpose() return torch.from_numpy(_lowerCamelCase ) def get_encoder_attention_layer_array(_lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Any ): __a : Optional[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' __a : Tuple = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase ) __a : Any = array.reshape(_lowerCamelCase ) if "kernel" in name: __a : Tuple = array.transpose() return torch.from_numpy(_lowerCamelCase ) print(F'''Loading model based on config from {config_path}...''' ) __a : int = BertConfig.from_json_file(_lowerCamelCase ) __a : str = BertForMaskedLM(_lowerCamelCase ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __a : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __a : BertSelfAttention = layer.attention.self __a : List[str] = get_encoder_attention_layer_array( _lowerCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape ) __a : Dict = get_encoder_attention_layer_array( _lowerCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape ) __a : Optional[Any] = get_encoder_attention_layer_array( _lowerCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape ) __a : Optional[Any] = get_encoder_attention_layer_array( _lowerCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape ) __a : List[str] = get_encoder_attention_layer_array( _lowerCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape ) __a : List[str] = get_encoder_attention_layer_array( _lowerCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape ) # Self-attention Output __a : BertSelfOutput = layer.attention.output __a : List[Any] = get_encoder_attention_layer_array( _lowerCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape ) __a : Tuple = get_encoder_attention_layer_array( _lowerCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape ) __a : Optional[Any] = get_encoder_layer_array(_lowerCamelCase , """_attention_layer_norm/gamma""" ) __a : List[str] = get_encoder_layer_array(_lowerCamelCase , """_attention_layer_norm/beta""" ) # Intermediate __a : BertIntermediate = layer.intermediate __a : str = get_encoder_layer_array(_lowerCamelCase , """_intermediate_dense/kernel""" ) __a : str = get_encoder_layer_array(_lowerCamelCase , """_intermediate_dense/bias""" ) # Output __a : BertOutput = layer.output __a : Union[str, Any] = get_encoder_layer_array(_lowerCamelCase , """_output_dense/kernel""" ) __a : List[str] = get_encoder_layer_array(_lowerCamelCase , """_output_dense/bias""" ) __a : Optional[int] = get_encoder_layer_array(_lowerCamelCase , """_output_layer_norm/gamma""" ) __a : Tuple = get_encoder_layer_array(_lowerCamelCase , """_output_layer_norm/beta""" ) # Embeddings __a : str = get_encoder_array("""_position_embedding_layer/embeddings""" ) __a : Any = get_encoder_array("""_type_embedding_layer/embeddings""" ) __a : Optional[Any] = get_encoder_array("""_embedding_norm_layer/gamma""" ) __a : Dict = get_encoder_array("""_embedding_norm_layer/beta""" ) # LM Head __a : Any = model.cls.predictions.transform __a : List[Any] = get_masked_lm_array("""dense/kernel""" ) __a : Optional[Any] = get_masked_lm_array("""dense/bias""" ) __a : Optional[int] = get_masked_lm_array("""layer_norm/gamma""" ) __a : Tuple = get_masked_lm_array("""layer_norm/beta""" ) __a : Optional[Any] = get_masked_lm_array("""embedding_table""" ) # Pooling __a : Tuple = BertPooler(config=_lowerCamelCase ) __a : BertPooler = get_encoder_array("""_pooler_layer/kernel""" ) __a : BertPooler = get_encoder_array("""_pooler_layer/bias""" ) # Export final model model.save_pretrained(_lowerCamelCase ) # Integration test - should load without any errors ;) __a : List[str] = BertForMaskedLM.from_pretrained(_lowerCamelCase ) print(new_model.eval() ) print("""Model conversion was done sucessfully!""" ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) lowercase__ = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
581
1
"""simple docstring""" from __future__ import annotations class __SCREAMING_SNAKE_CASE : def __init__( self :Optional[Any] ,__UpperCAmelCase :int ) -> None: """simple docstring""" lowerCamelCase__ : str = order # a_{0} ... a_{k} lowerCamelCase__ : Optional[Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} lowerCamelCase__ : Optional[Any] = [1.0] + [0.0] * order # x[n-1] ... x[n-k] lowerCamelCase__ : Any = [0.0] * self.order # y[n-1] ... y[n-k] lowerCamelCase__ : List[Any] = [0.0] * self.order def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :list[float] ,__UpperCAmelCase :list[float] ) -> None: """simple docstring""" if len(__UpperCAmelCase ) < self.order: lowerCamelCase__ : Tuple = [1.0, *a_coeffs] if len(__UpperCAmelCase ) != self.order + 1: lowerCamelCase__ : Dict = ( F"""Expected a_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(__UpperCAmelCase )}""" ) raise ValueError(__UpperCAmelCase ) if len(__UpperCAmelCase ) != self.order + 1: lowerCamelCase__ : int = ( F"""Expected b_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(__UpperCAmelCase )}""" ) raise ValueError(__UpperCAmelCase ) lowerCamelCase__ : str = a_coeffs lowerCamelCase__ : Dict = b_coeffs def lowercase_ ( self :Any ,__UpperCAmelCase :float ) -> float: """simple docstring""" lowerCamelCase__ : List[Any] = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 ,self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) lowerCamelCase__ : Optional[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] lowerCamelCase__ : Dict = self.input_history[:-1] lowerCamelCase__ : List[Any] = self.output_history[:-1] lowerCamelCase__ : List[str] = sample lowerCamelCase__ : List[Any] = result return result
121
"""simple docstring""" import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): UpperCAmelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING UpperCAmelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :Optional[int] ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCamelCase__ : List[str] = AudioClassificationPipeline(model=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase ) # test with a raw waveform lowerCamelCase__ : Optional[int] = np.zeros((3_40_00,) ) lowerCamelCase__ : Optional[Any] = np.zeros((1_40_00,) ) return audio_classifier, [audioa, audio] def lowercase_ ( self :Any ,__UpperCAmelCase :Optional[int] ,__UpperCAmelCase :Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Tuple = examples lowerCamelCase__ : Tuple = audio_classifier(__UpperCAmelCase ) # by default a model is initialized with num_labels=2 self.assertEqual( __UpperCAmelCase ,[ {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, ] ,) lowerCamelCase__ : Dict = audio_classifier(__UpperCAmelCase ,top_k=1 ) self.assertEqual( __UpperCAmelCase ,[ {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, ] ,) self.run_torchaudio(__UpperCAmelCase ) @require_torchaudio def lowercase_ ( self :List[Any] ,__UpperCAmelCase :List[Any] ) -> Union[str, Any]: """simple docstring""" import datasets # test with a local file lowerCamelCase__ : List[Any] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' ) lowerCamelCase__ : Union[str, Any] = dataset[0]['''audio''']['''array'''] lowerCamelCase__ : Any = audio_classifier(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase ,[ {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, ] ,) @require_torch def lowercase_ ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase__ : Any = '''anton-l/wav2vec2-random-tiny-classifier''' lowerCamelCase__ : List[Any] = pipeline('''audio-classification''' ,model=__UpperCAmelCase ) lowerCamelCase__ : Dict = np.ones((80_00,) ) lowerCamelCase__ : List[Any] = audio_classifier(__UpperCAmelCase ,top_k=4 ) lowerCamelCase__ : int = [ {'''score''': 0.0_842, '''label''': '''no'''}, {'''score''': 0.0_838, '''label''': '''up'''}, {'''score''': 0.0_837, '''label''': '''go'''}, {'''score''': 0.0_834, '''label''': '''right'''}, ] lowerCamelCase__ : str = [ {'''score''': 0.0_845, '''label''': '''stop'''}, {'''score''': 0.0_844, '''label''': '''on'''}, {'''score''': 0.0_841, '''label''': '''right'''}, {'''score''': 0.0_834, '''label''': '''left'''}, ] self.assertIn(nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) lowerCamelCase__ : Optional[Any] = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate} lowerCamelCase__ : List[Any] = audio_classifier(__UpperCAmelCase ,top_k=4 ) self.assertIn(nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def lowercase_ ( self :Optional[int] ) -> int: """simple docstring""" import datasets lowerCamelCase__ : Optional[int] = '''superb/wav2vec2-base-superb-ks''' lowerCamelCase__ : Optional[int] = pipeline('''audio-classification''' ,model=__UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = datasets.load_dataset('''anton-l/superb_dummy''' ,'''ks''' ,split='''test''' ) lowerCamelCase__ : Dict = np.array(dataset[3]['''speech'''] ,dtype=np.floataa ) lowerCamelCase__ : List[Any] = audio_classifier(__UpperCAmelCase ,top_k=4 ) self.assertEqual( nested_simplify(__UpperCAmelCase ,decimals=3 ) ,[ {'''score''': 0.981, '''label''': '''go'''}, {'''score''': 0.007, '''label''': '''up'''}, {'''score''': 0.006, '''label''': '''_unknown_'''}, {'''score''': 0.001, '''label''': '''down'''}, ] ,) @require_tf @unittest.skip('''Audio classification is not implemented for TF''' ) def lowercase_ ( self :Union[str, Any] ) -> List[Any]: """simple docstring""" pass
121
1
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer _snake_case : Union[str, Any] = logging.getLogger(__name__) def lowerCAmelCase_ ( ): __snake_case : int = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=__lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=__lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=__lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=__lowerCamelCase , default=1_0_0_0 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=__lowerCamelCase , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=__lowerCamelCase , type=__lowerCamelCase , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=__lowerCamelCase , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=__lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) __snake_case : List[str] = parser.parse_args() return args def lowerCAmelCase_ ( __lowerCamelCase ): def fn(__lowerCamelCase ): return tokenizer(examples["text"] ) return fn def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Tuple = [] for i in range(len(tokenized_data["input_ids"] ) ): __snake_case : Tuple = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } __snake_case : List[Any] = tf.train.Features(feature=__lowerCamelCase ) __snake_case : str = tf.train.Example(features=__lowerCamelCase ) __snake_case : List[str] = example.SerializeToString() records.append(__lowerCamelCase ) return records def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Optional[int] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: __snake_case : Optional[Any] = min(len(__lowerCamelCase ) , args.limit ) __snake_case : Dict = dataset.select(range(__lowerCamelCase ) ) print(F'Limiting the dataset to {args.limit} entries.' ) __snake_case : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) __snake_case : Dict = os.path.join(args.output_dir , args.split ) if not os.path.exists(__lowerCamelCase ): os.makedirs(__lowerCamelCase ) else: __snake_case : str = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. __snake_case : Any = tokenize_function(__lowerCamelCase ) __snake_case : Optional[Any] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(__lowerCamelCase ): # Concatenate all texts. __snake_case : List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()} __snake_case : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 __snake_case : Any = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. __snake_case : int = { k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )] for k, t in concatenated_examples.items() } return result __snake_case : Any = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=1_0_0_0 , num_proc=4 ) __snake_case : Optional[Any] = 0 __snake_case : Optional[Any] = 0 for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ): __snake_case : List[str] = grouped_dataset[shard : shard + args.shard_size] __snake_case : Any = len(dataset_snapshot["input_ids"] ) __snake_case : List[Any] = os.path.join(__lowerCamelCase , F'dataset-{shard_count}-{records_containing}.tfrecord' ) __snake_case : Optional[Any] = get_serialized_examples(__lowerCamelCase ) with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file: for i in range(len(__lowerCamelCase ) ): __snake_case : Union[str, Any] = serialized_examples[i] out_file.write(__lowerCamelCase ) print("Wrote file {} containing {} records".format(__lowerCamelCase , __lowerCamelCase ) ) shard_count += 1 total_records += records_containing with open(F'split-{args.split}-records-count.txt' , "w" ) as f: print(F'Total {args.split} records: {total_records}' , file=__lowerCamelCase ) if __name__ == "__main__": _snake_case : List[Any] = parse_args() main(args)
81
"""simple docstring""" def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> bool: if not all(x.isalpha() for x in string ): raise ValueError('String must only contain alphabetic characters.' ) SCREAMING_SNAKE_CASE = sorted(string.lower() ) return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": __UpperCamelCase = input('''Enter a string ''').strip() __UpperCamelCase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
247
0
'''simple docstring''' import random def A (__lowerCamelCase :list , __lowerCamelCase :Optional[Any] ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = [], [], [] for element in data: if element < pivot: less.append(__lowerCamelCase ) elif element > pivot: greater.append(__lowerCamelCase ) else: equal.append(__lowerCamelCase ) return less, equal, greater def A (__lowerCamelCase :list , __lowerCamelCase :int ): # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(__lowerCamelCase ) or index < 0: return None _lowerCAmelCase = items[random.randint(0 , len(__lowerCamelCase ) - 1 )] _lowerCAmelCase = 0 _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = _partition(__lowerCamelCase , __lowerCamelCase ) _lowerCAmelCase = len(__lowerCamelCase ) _lowerCAmelCase = len(__lowerCamelCase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowerCamelCase , __lowerCamelCase ) # must be in larger else: return quick_select(__lowerCamelCase , index - (m + count) )
710
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowercase = logging.get_logger(__name__) _lowercase = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowercase : List[Any] = '''table-transformer''' _lowercase : List[str] = ['''past_key_values'''] _lowercase : Any = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , _lowercase=True , _lowercase=None , _lowercase=3 , _lowercase=100 , _lowercase=6 , _lowercase=2_048 , _lowercase=8 , _lowercase=6 , _lowercase=2_048 , _lowercase=8 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase="relu" , _lowercase=256 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , _lowercase=False , _lowercase="sine" , _lowercase="resnet50" , _lowercase=True , _lowercase=False , _lowercase=1 , _lowercase=5 , _lowercase=2 , _lowercase=1 , _lowercase=1 , _lowercase=5 , _lowercase=2 , _lowercase=0.1 , **_lowercase , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(_lowercase , _lowercase ): _lowerCAmelCase = backbone_config.get("""model_type""" ) _lowerCAmelCase = CONFIG_MAPPING[backbone_model_type] _lowerCAmelCase = config_class.from_dict(_lowercase ) # set timm attributes to None _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None, None, None _lowerCAmelCase = use_timm_backbone _lowerCAmelCase = backbone_config _lowerCAmelCase = num_channels _lowerCAmelCase = num_queries _lowerCAmelCase = d_model _lowerCAmelCase = encoder_ffn_dim _lowerCAmelCase = encoder_layers _lowerCAmelCase = encoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = decoder_layers _lowerCAmelCase = decoder_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = activation_function _lowerCAmelCase = init_std _lowerCAmelCase = init_xavier_std _lowerCAmelCase = encoder_layerdrop _lowerCAmelCase = decoder_layerdrop _lowerCAmelCase = encoder_layers _lowerCAmelCase = auxiliary_loss _lowerCAmelCase = position_embedding_type _lowerCAmelCase = backbone _lowerCAmelCase = use_pretrained_backbone _lowerCAmelCase = dilation # Hungarian matcher _lowerCAmelCase = class_cost _lowerCAmelCase = bbox_cost _lowerCAmelCase = giou_cost # Loss coefficients _lowerCAmelCase = mask_loss_coefficient _lowerCAmelCase = dice_loss_coefficient _lowerCAmelCase = bbox_loss_coefficient _lowerCAmelCase = giou_loss_coefficient _lowerCAmelCase = eos_coefficient super().__init__(is_encoder_decoder=_lowercase , **_lowercase ) @property def _lowercase ( self ): """simple docstring""" return self.encoder_attention_heads @property def _lowercase ( self ): """simple docstring""" return self.d_model class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowercase : Optional[Any] = version.parse('''1.11''' ) @property def _lowercase ( self ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def _lowercase ( self ): """simple docstring""" return 1e-5 @property def _lowercase ( self ): """simple docstring""" return 12
162
0
'''simple docstring''' from random import randint, random def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool = False ,__UpperCamelCase: bool = False ,__UpperCamelCase: int = 5 ,): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [[-1] * number_of_cells] # Create a highway without any car SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Dict = max(__UpperCamelCase ,0 ) while i < number_of_cells: SCREAMING_SNAKE_CASE : int = ( randint(0 ,__UpperCamelCase ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 ,max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : List[Any] = highway_now[car_index + 1 :] for cell in range(len(__UpperCamelCase ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(__UpperCamelCase ,-1 ) def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: float ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : int = len(__UpperCamelCase ) # Beforce calculations, the highway is empty SCREAMING_SNAKE_CASE : int = [-1] * number_of_cells for car_index in range(__UpperCamelCase ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed SCREAMING_SNAKE_CASE : List[Any] = min(highway_now[car_index] + 1 ,__UpperCamelCase ) # Number of empty cell before the next car SCREAMING_SNAKE_CASE : Dict = get_distance(__UpperCamelCase ,__UpperCamelCase ) - 1 # We can't have the car causing an accident SCREAMING_SNAKE_CASE : Dict = min(next_highway[car_index] ,__UpperCamelCase ) if random() < probability: # Randomly, a driver will slow down SCREAMING_SNAKE_CASE : List[str] = max(next_highway[car_index] - 1 ,0 ) return next_highway def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: int ,__UpperCamelCase: float ,__UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = len(highway[0] ) for i in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : Optional[Any] = update(highway[i] ,__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = [-1] * number_of_cells for car_index in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE : int = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) SCREAMING_SNAKE_CASE : Union[str, Any] = (car_index + speed) % number_of_cells # Commit the change of position SCREAMING_SNAKE_CASE : List[Any] = speed highway.append(__UpperCamelCase ) return highway if __name__ == "__main__": import doctest doctest.testmod()
28
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowercase : Dict = logging.get_logger(__name__) lowercase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} # See all BART models at https://huggingface.co/models?filter=bart lowercase : Optional[Any] = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, """tokenizer_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""", }, } lowercase : Tuple = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } class __snake_case ( lowerCAmelCase ): _a : Tuple= VOCAB_FILES_NAMES _a : Any= PRETRAINED_VOCAB_FILES_MAP _a : Union[str, Any]= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : List[str]= ["input_ids", "attention_mask"] _a : List[Any]= BartTokenizer def __init__( self ,snake_case=None ,snake_case=None ,snake_case=None ,snake_case="replace" ,snake_case="<s>" ,snake_case="</s>" ,snake_case="</s>" ,snake_case="<s>" ,snake_case="<unk>" ,snake_case="<pad>" ,snake_case="<mask>" ,snake_case=False ,snake_case=True ,**snake_case ,): '''simple docstring''' super().__init__( snake_case ,snake_case ,tokenizer_file=snake_case ,errors=snake_case ,bos_token=snake_case ,eos_token=snake_case ,sep_token=snake_case ,cls_token=snake_case ,unk_token=snake_case ,pad_token=snake_case ,mask_token=snake_case ,add_prefix_space=snake_case ,trim_offsets=snake_case ,**snake_case ,) lowercase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" ,snake_case ) != add_prefix_space: lowercase : Union[str, Any] = getattr(snake_case ,pre_tok_state.pop("""type""" ) ) lowercase : Dict = add_prefix_space lowercase : List[str] = pre_tok_class(**snake_case ) lowercase : List[Any] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase : int = """post_processor""" lowercase : List[Any] = getattr(self.backend_tokenizer ,snake_case ,snake_case ) if tokenizer_component_instance: lowercase : List[Any] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase : Dict = tuple(state["""sep"""] ) if "cls" in state: lowercase : Dict = tuple(state["""cls"""] ) lowercase : Union[str, Any] = False if state.get("""add_prefix_space""" ,snake_case ) != add_prefix_space: lowercase : List[Any] = add_prefix_space lowercase : List[str] = True if state.get("""trim_offsets""" ,snake_case ) != trim_offsets: lowercase : Optional[int] = trim_offsets lowercase : Dict = True if changes_to_apply: lowercase : int = getattr(snake_case ,state.pop("""type""" ) ) lowercase : Any = component_class(**snake_case ) setattr(self.backend_tokenizer ,snake_case ,snake_case ) @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : str = AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else value lowercase : Optional[Any] = value def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ): '''simple docstring''' lowercase : Union[str, Any] = kwargs.get("""is_split_into_words""" ,snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*snake_case ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ): '''simple docstring''' lowercase : Union[str, Any] = kwargs.get("""is_split_into_words""" ,snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._encode_plus(*snake_case ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ): '''simple docstring''' lowercase : Union[str, Any] = self._tokenizer.model.save(snake_case ,name=snake_case ) return tuple(snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ): '''simple docstring''' lowercase : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ): '''simple docstring''' lowercase : Union[str, Any] = [self.sep_token_id] lowercase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
336
0
"""simple docstring""" from __future__ import annotations from typing import Any class A__ ( _lowerCamelCase): pass class A__ : def __init__( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = data __lowerCAmelCase : Node | None = None def __iter__( self ): __lowerCAmelCase : List[str] = self __lowerCAmelCase : int = [] while node: if node in visited: raise ContainsLoopError visited.append(_SCREAMING_SNAKE_CASE ) yield node.data __lowerCAmelCase : Union[str, Any] = node.next_node @property def __lowerCamelCase ( self ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": lowerCamelCase__ = Node(1) lowerCamelCase__ = Node(2) lowerCamelCase__ = Node(3) lowerCamelCase__ = Node(4) print(root_node.has_loop) # False lowerCamelCase__ = root_node.next_node print(root_node.has_loop) # True lowerCamelCase__ = Node(5) lowerCamelCase__ = Node(6) lowerCamelCase__ = Node(5) lowerCamelCase__ = Node(6) print(root_node.has_loop) # False lowerCamelCase__ = Node(1) print(root_node.has_loop) # False
709
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class A__ : def __init__( self ): __lowerCAmelCase : Any = {} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = {} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if nodea not in self.connections: self.add_node(_SCREAMING_SNAKE_CASE ) if nodea not in self.connections: self.add_node(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = probability def __lowerCamelCase ( self ): return list(self.connections ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : List[Any] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Optional[Any] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase : str = Counter(graph.get_nodes() ) __lowerCAmelCase : Tuple = start for _ in range(_UpperCamelCase ): __lowerCAmelCase : int = graph.transition(_UpperCamelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
549
0
import os import pytest from attr import dataclass __UpperCAmelCase = '''us-east-1''' # defaults region @dataclass class lowerCAmelCase_ : UpperCAmelCase__ : str UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role" UpperCAmelCase__ : Union[str, Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000} @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def snake_case_ ( self ) -> str: return F"""{self.framework}-transfromers-test""" @property def snake_case_ ( self ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def snake_case_ ( self ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
40
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''ViTFeatureExtractor'''] __UpperCAmelCase = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
1
'''simple docstring''' from __future__ import annotations def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> list[list[int]]: '''simple docstring''' snake_case__ : list[list[int]] = [] create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ ) return result def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(__magic_name__ , total_number - level + 2 ): current_list.append(__magic_name__ ) create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ ) current_list.pop() def UpperCamelCase__ ( __magic_name__ : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*__magic_name__ ) if __name__ == "__main__": A_ : Optional[int] = 4 A_ : Optional[Any] = 2 A_ : Dict = generate_all_combinations(n, k) print_all_state(total_list)
721
'''simple docstring''' def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : str ) -> str: '''simple docstring''' snake_case__ : int = len(__magic_name__ ) snake_case__ : int = len(__magic_name__ ) snake_case__ : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) snake_case__ : list = [] for char_count in range(__magic_name__ ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__magic_name__ ) if __name__ == "__main__": print(alternative_string_arrange("AB", "XYZ"), end=" ")
419
0
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin __magic_name__ : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class __snake_case (lowerCamelCase , unittest.TestCase ): __a = BartphoTokenizer __a = False __a = True def __a ( self: Tuple ): super().setUp() __lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] __lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) ) __lowerCamelCase = {"""unk_token""": """<unk>"""} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ) with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(f'{token} {vocab_tokens[token]}\n' ) __lowerCamelCase = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self: List[Any] , **A_: List[str] ): kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **A_ ) def __a ( self: int , A_: Union[str, Any] ): __lowerCamelCase = """This is a là test""" __lowerCamelCase = """This is a<unk><unk> test""" return input_text, output_text def __a ( self: Optional[Any] ): __lowerCamelCase = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map ) __lowerCamelCase = """This is a là test""" __lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split() __lowerCamelCase = tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) __lowerCamelCase = tokens + [tokenizer.unk_token] __lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
281
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a_ ( lowercase__ :Optional[Any], lowercase__ :List[str]=0.999, lowercase__ :Optional[int]="cosine", ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase__ :str ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase__ :Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' ) __lowerCamelCase = [] for i in range(lowercase__ ): __lowerCamelCase = i / num_diffusion_timesteps __lowerCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ), lowercase__ ) ) return torch.tensor(lowercase__, dtype=torch.floataa ) class __snake_case (lowerCamelCase , lowerCamelCase ): __a = [e.name for e in KarrasDiffusionSchedulers] __a = 2 @register_to_config def __init__( self: Any , A_: int = 10_00 , A_: float = 0.00_085 , A_: float = 0.012 , A_: str = "linear" , A_: Optional[Union[np.ndarray, List[float]]] = None , A_: str = "epsilon" , A_: Optional[bool] = False , A_: Optional[bool] = False , A_: float = 1.0 , A_: str = "linspace" , A_: int = 0 , ): if trained_betas is not None: __lowerCamelCase = torch.tensor(A_ , dtype=torch.floataa ) elif beta_schedule == "linear": __lowerCamelCase = torch.linspace(A_ , A_ , A_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __lowerCamelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , A_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __lowerCamelCase = betas_for_alpha_bar(A_ , alpha_transform_type="""cosine""" ) elif beta_schedule == "exp": __lowerCamelCase = betas_for_alpha_bar(A_ , alpha_transform_type="""exp""" ) else: raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' ) __lowerCamelCase = 1.0 - self.betas __lowerCamelCase = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(A_ , A_ , A_ ) __lowerCamelCase = use_karras_sigmas def __a ( self: Optional[Any] , A_: List[Any] , A_: Tuple=None ): if schedule_timesteps is None: __lowerCamelCase = self.timesteps __lowerCamelCase = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __lowerCamelCase = 1 if len(A_ ) > 1 else 0 else: __lowerCamelCase = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep __lowerCamelCase = self._index_counter[timestep_int] return indices[pos].item() @property def __a ( self: Tuple ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __a ( self: Union[str, Any] , A_: torch.FloatTensor , A_: Union[float, torch.FloatTensor] , ): __lowerCamelCase = self.index_for_timestep(A_ ) __lowerCamelCase = self.sigmas[step_index] __lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5) return sample def __a ( self: str , A_: int , A_: Union[str, torch.device] = None , A_: Optional[int] = None , ): __lowerCamelCase = num_inference_steps __lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , A_ , dtype=A_ )[::-1].copy() elif self.config.timestep_spacing == "leading": __lowerCamelCase = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowerCamelCase = (np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(A_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __lowerCamelCase = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowerCamelCase = (np.arange(A_ , 0 , -step_ratio )).round().copy().astype(A_ ) timesteps -= 1 else: raise ValueError( f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' ) __lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __lowerCamelCase = np.log(A_ ) __lowerCamelCase = np.interp(A_ , np.arange(0 , len(A_ ) ) , A_ ) if self.config.use_karras_sigmas: __lowerCamelCase = self._convert_to_karras(in_sigmas=A_ , num_inference_steps=self.num_inference_steps ) __lowerCamelCase = np.array([self._sigma_to_t(A_ , A_ ) for sigma in sigmas] ) __lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __lowerCamelCase = torch.from_numpy(A_ ).to(device=A_ ) __lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) __lowerCamelCase = torch.from_numpy(A_ ) __lowerCamelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(A_ ).startswith("""mps""" ): # mps does not support float64 __lowerCamelCase = timesteps.to(A_ , dtype=torch.floataa ) else: __lowerCamelCase = timesteps.to(device=A_ ) # empty dt and derivative __lowerCamelCase = None __lowerCamelCase = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __lowerCamelCase = defaultdict(A_ ) def __a ( self: Any , A_: int , A_: int ): # get log sigma __lowerCamelCase = np.log(A_ ) # get distribution __lowerCamelCase = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range __lowerCamelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) __lowerCamelCase = low_idx + 1 __lowerCamelCase = log_sigmas[low_idx] __lowerCamelCase = log_sigmas[high_idx] # interpolate sigmas __lowerCamelCase = (low - log_sigma) / (low - high) __lowerCamelCase = np.clip(A_ , 0 , 1 ) # transform interpolation to time range __lowerCamelCase = (1 - w) * low_idx + w * high_idx __lowerCamelCase = t.reshape(sigma.shape ) return t def __a ( self: Dict , A_: torch.FloatTensor , A_: Tuple ): __lowerCamelCase = in_sigmas[-1].item() __lowerCamelCase = in_sigmas[0].item() __lowerCamelCase = 7.0 # 7.0 is the value used in the paper __lowerCamelCase = np.linspace(0 , 1 , A_ ) __lowerCamelCase = sigma_min ** (1 / rho) __lowerCamelCase = sigma_max ** (1 / rho) __lowerCamelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def __a ( self: List[Any] ): return self.dt is None def __a ( self: Union[str, Any] , A_: Union[torch.FloatTensor, np.ndarray] , A_: Union[float, torch.FloatTensor] , A_: Union[torch.FloatTensor, np.ndarray] , A_: bool = True , ): __lowerCamelCase = self.index_for_timestep(A_ ) # advance index counter by 1 __lowerCamelCase = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __lowerCamelCase = self.sigmas[step_index] __lowerCamelCase = self.sigmas[step_index + 1] else: # 2nd order / Heun's method __lowerCamelCase = self.sigmas[step_index - 1] __lowerCamelCase = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __lowerCamelCase = 0 __lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next __lowerCamelCase = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next __lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": __lowerCamelCase = model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' ) if self.config.clip_sample: __lowerCamelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __lowerCamelCase = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __lowerCamelCase = sigma_next - sigma_hat # store for 2nd order step __lowerCamelCase = derivative __lowerCamelCase = dt __lowerCamelCase = sample else: # 2. 2nd order / Heun's method __lowerCamelCase = (sample - pred_original_sample) / sigma_next __lowerCamelCase = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample __lowerCamelCase = self.dt __lowerCamelCase = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=A_ ) def __a ( self: str , A_: torch.FloatTensor , A_: torch.FloatTensor , A_: torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(A_ ): # mps does not support float64 __lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __lowerCamelCase = self.timesteps.to(original_samples.device ) __lowerCamelCase = timesteps.to(original_samples.device ) __lowerCamelCase = [self.index_for_timestep(A_ , A_ ) for t in timesteps] __lowerCamelCase = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __lowerCamelCase = sigma.unsqueeze(-1 ) __lowerCamelCase = original_samples + noise * sigma return noisy_samples def __len__( self: Tuple ): return self.config.num_train_timesteps
281
1
'''simple docstring''' __UpperCAmelCase = { 'joule': 1.0, 'kilojoule': 1000, 'megajoule': 100_0000, 'gigajoule': 10_0000_0000, 'wattsecond': 1.0, 'watthour': 3600, 'kilowatthour': 360_0000, 'newtonmeter': 1.0, 'calorie_nutr': 4186.8, 'kilocalorie_nutr': 418_6800.00, 'electronvolt': 1.6_0_2_1_7_6_6_3_4e-1_9, 'britishthermalunit_it': 1055.0_5585, 'footpound': 1.35_5818, } def SCREAMING_SNAKE_CASE_ ( snake_case_ : str , snake_case_ : str , snake_case_ : float ) -> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: SCREAMING_SNAKE_CASE : List[Any] = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {", ".join(snake_case_ )}""" ) raise ValueError(snake_case_ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
703
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): __UpperCAmelCase = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: __UpperCAmelCase = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] ) -> Tuple: SCREAMING_SNAKE_CASE : Any = (images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE : List[Any] = numpy_to_pil(snake_case_ ) return images def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] ) -> Any: if images.ndim == 3: SCREAMING_SNAKE_CASE : Optional[Any] = images[None, ...] SCREAMING_SNAKE_CASE : Optional[int] = (images * 255).round().astype('uint8' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images] else: SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(snake_case_ ) for image in images] return pil_images
220
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 255 , __UpperCAmelCase=True , ): '''simple docstring''' __lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = do_normalize __lowerCamelCase = image_mean __lowerCamelCase = image_std __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_pad def lowerCamelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if not batched: __lowerCamelCase = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): __lowerCamelCase = image.size else: __lowerCamelCase = image.shape[1], image.shape[2] if w < h: __lowerCamelCase = int(self.size['''shortest_edge'''] * h / w ) __lowerCamelCase = self.size['''shortest_edge'''] elif w > h: __lowerCamelCase = self.size['''shortest_edge'''] __lowerCamelCase = int(self.size['''shortest_edge'''] * w / h ) else: __lowerCamelCase = self.size['''shortest_edge'''] __lowerCamelCase = self.size['''shortest_edge'''] else: __lowerCamelCase = [] for image in image_inputs: __lowerCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCamelCase = max(__lowerCamelCase , key=lambda __UpperCAmelCase : item[0] )[0] __lowerCamelCase = max(__lowerCamelCase , key=lambda __UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ): lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = DetaImageProcessingTester(self ) @property def lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''do_rescale''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCamelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) __lowerCamelCase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCamelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values __lowerCamelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCamelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCamelCase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values __lowerCamelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: __lowerCamelCase = json.loads(f.read() ) __lowerCamelCase = {'''image_id''': 39769, '''annotations''': target} # encode them __lowerCamelCase = DetaImageProcessor() __lowerCamelCase = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' ) # verify pixel values __lowerCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase ) __lowerCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) ) # verify area __lowerCamelCase = torch.tensor([5_887.9_600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) ) # verify boxes __lowerCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase ) __lowerCamelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1E-3 ) ) # verify image_id __lowerCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) ) # verify is_crowd __lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) ) # verify class_labels __lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) ) # verify orig_size __lowerCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) ) # verify size __lowerCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: __lowerCamelCase = json.loads(f.read() ) __lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} __lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them __lowerCamelCase = DetaImageProcessor(format='''coco_panoptic''' ) __lowerCamelCase = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' ) # verify pixel values __lowerCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase ) __lowerCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) ) # verify area __lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) ) # verify boxes __lowerCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase ) __lowerCamelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1E-3 ) ) # verify image_id __lowerCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) ) # verify is_crowd __lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) ) # verify class_labels __lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) ) # verify masks __lowerCamelCase = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase ) # verify orig_size __lowerCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) ) # verify size __lowerCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
175
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger __snake_case : Optional[int] =get_logger(__name__) class lowerCamelCase__ : '''simple docstring''' def __init__(self ,__lowerCamelCase = None ) -> Any: """simple docstring""" lowerCAmelCase__ : List[Any] = ( os.path.join(__lowerCamelCase ,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) lowerCAmelCase__ : Tuple = Extractor def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str: """simple docstring""" from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" lowerCAmelCase__ : List[Any] = os.path.abspath(__lowerCamelCase ) return os.path.join(self.extract_dir ,hash_url_to_filename(__lowerCamelCase ) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> bool: """simple docstring""" return force_extract or ( not os.path.isfile(__lowerCamelCase ) and not (os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase )) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = False ) -> str: """simple docstring""" lowerCAmelCase__ : List[str] = self.extractor.infer_extractor_format(__lowerCamelCase ) if not extractor_format: return input_path lowerCAmelCase__ : Optional[int] = self._get_output_path(__lowerCamelCase ) if self._do_extract(__lowerCamelCase ,__lowerCamelCase ): self.extractor.extract(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) return output_path class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' @classmethod @abstractmethod def lowerCAmelCase__ (cls ,__lowerCamelCase ,**__lowerCamelCase ) -> bool: """simple docstring""" ... @staticmethod @abstractmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None: """simple docstring""" ... class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__): '''simple docstring''' snake_case_ =[] @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> List[str]: """simple docstring""" with open(__lowerCamelCase ,'''rb''' ) as f: return f.read(__lowerCamelCase ) @classmethod def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase = b"" ) -> bool: """simple docstring""" if not magic_number: lowerCAmelCase__ : Optional[Any] = max(len(__lowerCamelCase ) for cls_magic_number in cls.magic_numbers ) try: lowerCAmelCase__ : Optional[Any] = cls.read_magic_number(__lowerCamelCase ,__lowerCamelCase ) except OSError: return False return any(magic_number.startswith(__lowerCamelCase ) for cls_magic_number in cls.magic_numbers ) class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' @classmethod def lowerCAmelCase__ (cls ,__lowerCamelCase ,**__lowerCamelCase ) -> bool: """simple docstring""" return tarfile.is_tarfile(__lowerCamelCase ) @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> Any: """simple docstring""" def resolved(__lowerCamelCase ) -> str: return os.path.realpath(os.path.abspath(__lowerCamelCase ) ) def badpath(__lowerCamelCase ,__lowerCamelCase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(__lowerCamelCase ,__lowerCamelCase ) ).startswith(__lowerCamelCase ) def badlink(__lowerCamelCase ,__lowerCamelCase ) -> bool: # Links are interpreted relative to the directory containing the link lowerCAmelCase__ : Dict = resolved(os.path.join(__lowerCamelCase ,os.path.dirname(info.name ) ) ) return badpath(info.linkname ,base=__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = resolved(__lowerCamelCase ) for finfo in members: if badpath(finfo.name ,__lowerCamelCase ): logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" ) elif finfo.issym() and badlink(__lowerCamelCase ,__lowerCamelCase ): logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" ) elif finfo.islnk() and badlink(__lowerCamelCase ,__lowerCamelCase ): logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" ) else: yield finfo @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None: """simple docstring""" os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase ) lowerCAmelCase__ : int = tarfile.open(__lowerCamelCase ) tar_file.extractall(__lowerCamelCase ,members=TarExtractor.safemembers(__lowerCamelCase ,__lowerCamelCase ) ) tar_file.close() class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =[b"""\x1F\x8B"""] @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None: """simple docstring""" with gzip.open(__lowerCamelCase ,'''rb''' ) as gzip_file: with open(__lowerCamelCase ,'''wb''' ) as extracted_file: shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase ) class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =[ b"""PK\x03\x04""", b"""PK\x05\x06""", # empty archive b"""PK\x07\x08""", # spanned archive ] @classmethod def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase = b"" ) -> bool: """simple docstring""" if super().is_extractable(__lowerCamelCase ,magic_number=__lowerCamelCase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(__lowerCamelCase ,'''rb''' ) as fp: lowerCAmelCase__ : Optional[int] = _EndRecData(__lowerCamelCase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: lowerCAmelCase__ : Optional[int] = fp.read(__lowerCamelCase ) # CD is where we expect it to be if len(__lowerCamelCase ) == sizeCentralDir: lowerCAmelCase__ : List[str] = struct.unpack(__lowerCamelCase ,__lowerCamelCase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None: """simple docstring""" os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase ) with zipfile.ZipFile(__lowerCamelCase ,'''r''' ) as zip_file: zip_file.extractall(__lowerCamelCase ) zip_file.close() class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =[b"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None: """simple docstring""" with lzma.open(__lowerCamelCase ) as compressed_file: with open(__lowerCamelCase ,'''wb''' ) as extracted_file: shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase ) class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =[b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None: """simple docstring""" if not config.RARFILE_AVAILABLE: raise ImportError('''Please pip install rarfile''' ) import rarfile os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase ) lowerCAmelCase__ : Dict = rarfile.RarFile(__lowerCamelCase ) rf.extractall(__lowerCamelCase ) rf.close() class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =[b"""\x28\xb5\x2F\xFD"""] @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None: """simple docstring""" if not config.ZSTANDARD_AVAILABLE: raise ImportError('''Please pip install zstandard''' ) import zstandard as zstd lowerCAmelCase__ : Dict = zstd.ZstdDecompressor() with open(__lowerCamelCase ,'''rb''' ) as ifh, open(__lowerCamelCase ,'''wb''' ) as ofh: dctx.copy_stream(__lowerCamelCase ,__lowerCamelCase ) class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =[b"""\x42\x5A\x68"""] @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None: """simple docstring""" with bza.open(__lowerCamelCase ,'''rb''' ) as compressed_file: with open(__lowerCamelCase ,'''wb''' ) as extracted_file: shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase ) class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =[b"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None: """simple docstring""" if not config.PY7ZR_AVAILABLE: raise ImportError('''Please pip install py7zr''' ) import pyazr os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase ) with pyazr.SevenZipFile(__lowerCamelCase ,'''r''' ) as archive: archive.extractall(__lowerCamelCase ) class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =[b"""\x04\x22\x4D\x18"""] @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None: """simple docstring""" if not config.LZ4_AVAILABLE: raise ImportError('''Please pip install lz4''' ) import lza.frame with lza.frame.open(__lowerCamelCase ,'''rb''' ) as compressed_file: with open(__lowerCamelCase ,'''wb''' ) as extracted_file: shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase ) class lowerCamelCase__ : '''simple docstring''' snake_case_ ={ "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def lowerCAmelCase__ (cls ) -> str: """simple docstring""" return max( len(__lowerCamelCase ) for extractor in cls.extractors.values() if issubclass(__lowerCamelCase ,__lowerCamelCase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> Tuple: """simple docstring""" try: return MagicNumberBaseExtractor.read_magic_number(__lowerCamelCase ,magic_number_length=__lowerCamelCase ) except OSError: return b"" @classmethod def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase = False ) -> bool: """simple docstring""" warnings.warn( '''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'infer_extractor_format\' instead.''' ,category=__lowerCamelCase ,) lowerCAmelCase__ : int = cls.infer_extractor_format(__lowerCamelCase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def lowerCAmelCase__ (cls ,__lowerCamelCase ) -> str: # <Added version="2.4.0"/> """simple docstring""" lowerCAmelCase__ : Dict = cls._get_magic_number_max_length() lowerCAmelCase__ : Any = cls._read_magic_number(__lowerCamelCase ,__lowerCamelCase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(__lowerCamelCase ,magic_number=__lowerCamelCase ): return extractor_format @classmethod def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = "deprecated" ,) -> None: """simple docstring""" os.makedirs(os.path.dirname(__lowerCamelCase ) ,exist_ok=__lowerCamelCase ) # Prevent parallel extractions lowerCAmelCase__ : Dict = str(Path(__lowerCamelCase ).with_suffix('''.lock''' ) ) with FileLock(__lowerCamelCase ): shutil.rmtree(__lowerCamelCase ,ignore_errors=__lowerCamelCase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(__lowerCamelCase ,__lowerCamelCase ): # passed as positional arg warnings.warn( '''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'extractor_format\' instead.''' ,category=__lowerCamelCase ,) lowerCAmelCase__ : Dict = extractor if extractor != '''deprecated''' else extractor_format else: lowerCAmelCase__ : str = cls.extractors[extractor_format] return extractor.extract(__lowerCamelCase ,__lowerCamelCase ) else: warnings.warn( '''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ''' '''exception in 3.0.0.''' ,category=__lowerCamelCase ,) for extractor in cls.extractors.values(): if extractor.is_extractable(__lowerCamelCase ): return extractor.extract(__lowerCamelCase ,__lowerCamelCase )
647
0
"""simple docstring""" # flake8: noqa # Lint as: python3 __UpperCAmelCase = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
194
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED __UpperCAmelCase = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } __UpperCAmelCase = { 'allenai/led-base-16384': 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Tuple = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCAmelCase__ : int = bs[:] UpperCAmelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCamelCase ) cs.append(2**8 + n ) n += 1 UpperCAmelCase__ : Tuple = [chr(__UpperCamelCase ) for n in cs] return dict(zip(__UpperCamelCase , __UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : int = set() UpperCAmelCase__ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Optional[Any] = char return pairs class __lowercase ( __lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["""input_ids""", """attention_mask"""] def __init__( self : Union[str, Any] ,A : Any ,A : Dict ,A : Optional[Any]="replace" ,A : Dict="<s>" ,A : str="</s>" ,A : str="</s>" ,A : Dict="<s>" ,A : List[str]="<unk>" ,A : Union[str, Any]="<pad>" ,A : Any="<mask>" ,A : str=False ,**A : Optional[Any] ,): '''simple docstring''' UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token UpperCAmelCase__ : Any = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase__ : Union[str, Any] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token super().__init__( errors=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,**A ,) with open(A ,encoding="""utf-8""" ) as vocab_handle: UpperCAmelCase__ : Tuple = json.load(A ) UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()} UpperCAmelCase__ : List[Any] = errors # how to handle errors in decoding UpperCAmelCase__ : List[str] = bytes_to_unicode() UpperCAmelCase__ : int = {v: k for k, v in self.byte_encoder.items()} with open(A ,encoding="""utf-8""" ) as merges_handle: UpperCAmelCase__ : List[Any] = merges_handle.read().split("""\n""" )[1:-1] UpperCAmelCase__ : Tuple = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase__ : Any = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase__ : Optional[Any] = {} UpperCAmelCase__ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def __lowercase ( self : List[Any] ): '''simple docstring''' return len(self.encoder ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def __lowercase ( self : Optional[int] ,A : Union[str, Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(A ) UpperCAmelCase__ : int = get_pairs(A ) if not pairs: return token while True: UpperCAmelCase__ : str = min(A ,key=lambda A : self.bpe_ranks.get(A ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Any = 0 while i < len(A ): try: UpperCAmelCase__ : Optional[Any] = word.index(A ,A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : int = j if word[i] == first and i < len(A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : List[str] = tuple(A ) UpperCAmelCase__ : str = new_word if len(A ) == 1: break else: UpperCAmelCase__ : str = get_pairs(A ) UpperCAmelCase__ : int = """ """.join(A ) UpperCAmelCase__ : List[str] = word return word def __lowercase ( self : Optional[Any] ,A : Any ): '''simple docstring''' UpperCAmelCase__ : Any = [] for token in re.findall(self.pat ,A ): UpperCAmelCase__ : Any = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) ) return bpe_tokens def __lowercase ( self : Dict ,A : int ): '''simple docstring''' return self.encoder.get(A ,self.encoder.get(self.unk_token ) ) def __lowercase ( self : Dict ,A : Optional[Any] ): '''simple docstring''' return self.decoder.get(A ) def __lowercase ( self : Optional[Any] ,A : int ): '''simple docstring''' UpperCAmelCase__ : int = """""".join(A ) UpperCAmelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def __lowercase ( self : Optional[int] ,A : str ,A : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(A ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase__ : Any = os.path.join( A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase__ : List[str] = os.path.join( A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(A ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A ,ensure_ascii=A ) + """\n""" ) UpperCAmelCase__ : Any = 0 with open(A ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." """ Please check that the tokenizer is not corrupted!""" ) UpperCAmelCase__ : Optional[int] = token_index writer.write(""" """.join(A ) + """\n""" ) index += 1 return vocab_file, merge_file def __lowercase ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : Dict = [self.cls_token_id] UpperCAmelCase__ : Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowercase ( self : int ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : str = [self.sep_token_id] UpperCAmelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowercase ( self : Any ,A : str ,A : List[Any]=False ,**A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()): UpperCAmelCase__ : Dict = """ """ + text return (text, kwargs) def __lowercase ( self : Dict ,A : Union[Dict[str, EncodedInput], BatchEncoding] ,A : Optional[int] = None ,A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,A : Optional[int] = None ,A : Optional[bool] = None ,): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = super()._pad( encoded_inputs=A ,max_length=A ,padding_strategy=A ,pad_to_multiple_of=A ,return_attention_mask=A ,) # Load from model defaults if return_attention_mask is None: UpperCAmelCase__ : str = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase__ : Tuple = len(encoded_inputs["""global_attention_mask"""] ) != len(A ) if needs_to_be_padded: UpperCAmelCase__ : List[Any] = len(A ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase__ : Tuple = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
194
1
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def snake_case_ (__A : Optional[Any] , __A : Optional[Any] , __A : List[Any] ) -> Any: if isinstance(__A , torch.Tensor ): return image elif isinstance(__A , PIL.Image.Image ): __lowerCAmelCase : int = [image] if isinstance(image[0] , PIL.Image.Image ): __lowerCAmelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] __lowerCAmelCase : str = np.concatenate(__A , axis=0 ) __lowerCAmelCase : Dict = np.array(__A ).astype(np.floataa ) / 255.0 __lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 ) __lowerCAmelCase : Dict = 2.0 * image - 1.0 __lowerCAmelCase : List[str] = torch.from_numpy(__A ) elif isinstance(image[0] , torch.Tensor ): __lowerCAmelCase : Tuple = torch.cat(__A , dim=0 ) return image def snake_case_ (__A : Any , __A : Union[str, Any] , __A : Tuple , __A : Optional[int]=0.9995 ) -> Any: if not isinstance(__A , np.ndarray ): __lowerCAmelCase : Dict = True __lowerCAmelCase : Optional[Any] = va.device __lowerCAmelCase : Optional[Any] = va.cpu().numpy() __lowerCAmelCase : Optional[int] = va.cpu().numpy() __lowerCAmelCase : Optional[Any] = np.sum(va * va / (np.linalg.norm(__A ) * np.linalg.norm(__A )) ) if np.abs(__A ) > DOT_THRESHOLD: __lowerCAmelCase : Tuple = (1 - t) * va + t * va else: __lowerCAmelCase : Optional[Any] = np.arccos(__A ) __lowerCAmelCase : Optional[int] = np.sin(__A ) __lowerCAmelCase : Dict = theta_a * t __lowerCAmelCase : Tuple = np.sin(__A ) __lowerCAmelCase : str = np.sin(theta_a - theta_t ) / sin_theta_a __lowerCAmelCase : str = sin_theta_t / sin_theta_a __lowerCAmelCase : int = sa * va + sa * va if inputs_are_torch: __lowerCAmelCase : Union[str, Any] = torch.from_numpy(__A ).to(__A ) return va def snake_case_ (__A : List[str] , __A : Any ) -> str: __lowerCAmelCase : Optional[int] = F.normalize(__A , dim=-1 ) __lowerCAmelCase : Dict = F.normalize(__A , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def snake_case_ (__A : Optional[int] , __A : Union[str, Any] ) -> List[str]: for param in model.parameters(): __lowerCAmelCase : List[Any] = value class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowerCAmelCase : CLIPFeatureExtractor , lowerCAmelCase : int=None , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=None , ) -> List[str]: """simple docstring""" super().__init__() self.register_modules( vae=lowerCAmelCase , text_encoder=lowerCAmelCase , clip_model=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , feature_extractor=lowerCAmelCase , coca_model=lowerCAmelCase , coca_tokenizer=lowerCAmelCase , coca_transform=lowerCAmelCase , ) __lowerCAmelCase : int = ( feature_extractor.size if isinstance(feature_extractor.size , lowerCAmelCase ) else feature_extractor.size["""shortest_edge"""] ) __lowerCAmelCase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , lowerCAmelCase ) set_requires_grad(self.clip_model , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[Union[str, int]] = "auto" ) -> Optional[Any]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowerCAmelCase : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: """simple docstring""" self.enable_attention_slicing(lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: """simple docstring""" set_requires_grad(self.vae , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: """simple docstring""" set_requires_grad(self.vae , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" set_requires_grad(self.unet , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: """simple docstring""" set_requires_grad(self.unet , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase ) __lowerCAmelCase : Dict = max(num_inference_steps - init_timestep , 0 ) __lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Tuple=None ) -> Dict: """simple docstring""" if not isinstance(lowerCAmelCase , torch.Tensor ): raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase )}''' ) __lowerCAmelCase : Dict = image.to(device=lowerCAmelCase , dtype=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ): __lowerCAmelCase : int = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase ) ] __lowerCAmelCase : str = torch.cat(lowerCAmelCase , dim=0 ) else: __lowerCAmelCase : Union[str, Any] = self.vae.encode(lowerCAmelCase ).latent_dist.sample(lowerCAmelCase ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowerCAmelCase : int = 0.1_8215 * init_latents __lowerCAmelCase : List[str] = init_latents.repeat_interleave(lowerCAmelCase , dim=0 ) __lowerCAmelCase : int = randn_tensor(init_latents.shape , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase ) # get latents __lowerCAmelCase : Any = self.scheduler.add_noise(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Optional[Any] = init_latents return latents def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Tuple ) -> List[Any]: """simple docstring""" __lowerCAmelCase : List[Any] = self.coca_transform(lowerCAmelCase ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): __lowerCAmelCase : Tuple = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) __lowerCAmelCase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" ) def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" __lowerCAmelCase : Optional[Any] = self.feature_extractor.preprocess(lowerCAmelCase ) __lowerCAmelCase : str = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half() __lowerCAmelCase : Any = self.clip_model.get_image_features(lowerCAmelCase ) __lowerCAmelCase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase ) __lowerCAmelCase : Any = image_embeddings_clip.repeat_interleave(lowerCAmelCase , dim=0 ) return image_embeddings_clip @torch.enable_grad() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , ) -> List[Any]: """simple docstring""" __lowerCAmelCase : List[Any] = latents.detach().requires_grad_() __lowerCAmelCase : Any = self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase ) # predict the noise residual __lowerCAmelCase : Any = self.unet(lowerCAmelCase , lowerCAmelCase , encoder_hidden_states=lowerCAmelCase ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): __lowerCAmelCase : List[Any] = self.scheduler.alphas_cumprod[timestep] __lowerCAmelCase : Any = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowerCAmelCase : Dict = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 __lowerCAmelCase : str = torch.sqrt(lowerCAmelCase ) __lowerCAmelCase : str = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , lowerCAmelCase ): __lowerCAmelCase : Dict = self.scheduler.sigmas[index] __lowerCAmelCase : Any = latents - sigma * noise_pred else: raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowerCAmelCase : int = 1 / 0.1_8215 * sample __lowerCAmelCase : Dict = self.vae.decode(lowerCAmelCase ).sample __lowerCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCAmelCase : str = transforms.Resize(self.feature_extractor_size )(lowerCAmelCase ) __lowerCAmelCase : str = self.normalize(lowerCAmelCase ).to(latents.dtype ) __lowerCAmelCase : List[Any] = self.clip_model.get_image_features(lowerCAmelCase ) __lowerCAmelCase : Optional[int] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase ) __lowerCAmelCase : int = spherical_dist_loss(lowerCAmelCase , lowerCAmelCase ).mean() * clip_guidance_scale __lowerCAmelCase : Tuple = -torch.autograd.grad(lowerCAmelCase , lowerCAmelCase )[0] if isinstance(self.scheduler , lowerCAmelCase ): __lowerCAmelCase : Optional[int] = latents.detach() + grads * (sigma**2) __lowerCAmelCase : List[Any] = noise_pred_original else: __lowerCAmelCase : Optional[int] = noise_pred_original - torch.sqrt(lowerCAmelCase ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Tuple , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = 5_12 , lowerCAmelCase : Optional[int] = 5_12 , lowerCAmelCase : float = 0.6 , lowerCAmelCase : Optional[int] = 50 , lowerCAmelCase : Optional[float] = 7.5 , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[float] = 1_00 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : float = 0.8 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , ) -> List[Any]: """simple docstring""" if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(lowerCAmelCase )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(lowerCAmelCase , torch.Generator ) and batch_size > 1: __lowerCAmelCase : int = [generator] + [None] * (batch_size - 1) __lowerCAmelCase : Union[str, Any] = [ ("""model""", self.coca_model is None), ("""tokenizer""", self.coca_tokenizer is None), ("""transform""", self.coca_transform is None), ] __lowerCAmelCase : Optional[int] = [x[0] for x in coca_is_none if x[1]] __lowerCAmelCase : Union[str, Any] = """, """.join(lowerCAmelCase ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(lowerCAmelCase ): raise ValueError( f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) __lowerCAmelCase : List[Any] = self.get_image_description(lowerCAmelCase ) if style_prompt is None: if len(lowerCAmelCase ): raise ValueError( f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) __lowerCAmelCase : Optional[int] = self.get_image_description(lowerCAmelCase ) # get prompt text embeddings for content and style __lowerCAmelCase : List[Any] = self.tokenizer( lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , ) __lowerCAmelCase : Any = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] __lowerCAmelCase : Union[str, Any] = self.tokenizer( lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , ) __lowerCAmelCase : str = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] __lowerCAmelCase : Union[str, Any] = slerp(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # duplicate text embeddings for each generation per prompt __lowerCAmelCase : List[Any] = text_embeddings.repeat_interleave(lowerCAmelCase , dim=0 ) # set timesteps __lowerCAmelCase : Tuple = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) __lowerCAmelCase : Any = {} if accepts_offset: __lowerCAmelCase : str = 1 self.scheduler.set_timesteps(lowerCAmelCase , **lowerCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) __lowerCAmelCase ,__lowerCAmelCase : Union[str, Any] = self.get_timesteps(lowerCAmelCase , lowerCAmelCase , self.device ) __lowerCAmelCase : Tuple = timesteps[:1].repeat(lowerCAmelCase ) # Preprocess image __lowerCAmelCase : Any = preprocess(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : List[Any] = self.prepare_latents( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text_embeddings.dtype , self.device , lowerCAmelCase ) __lowerCAmelCase : str = preprocess(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Union[str, Any] = self.prepare_latents( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text_embeddings.dtype , self.device , lowerCAmelCase ) __lowerCAmelCase : List[str] = slerp(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if clip_guidance_scale > 0: __lowerCAmelCase : Union[str, Any] = self.get_clip_image_embeddings(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Dict = self.get_clip_image_embeddings(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : str = slerp( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __lowerCAmelCase : int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __lowerCAmelCase : List[Any] = content_text_input.input_ids.shape[-1] __lowerCAmelCase : Dict = self.tokenizer([""""""] , padding="""max_length""" , max_length=lowerCAmelCase , return_tensors="""pt""" ) __lowerCAmelCase : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt __lowerCAmelCase : Union[str, Any] = uncond_embeddings.repeat_interleave(lowerCAmelCase , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowerCAmelCase : int = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __lowerCAmelCase : List[Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) __lowerCAmelCase : List[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps __lowerCAmelCase : Optional[Any] = torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device="""cpu""" , dtype=lowerCAmelCase ).to( self.device ) else: __lowerCAmelCase : Optional[int] = torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) __lowerCAmelCase : Dict = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __lowerCAmelCase : int = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __lowerCAmelCase : Any = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __lowerCAmelCase : Optional[Any] = {} if accepts_eta: __lowerCAmelCase : Optional[Any] = eta # check if the scheduler accepts generator __lowerCAmelCase : Union[str, Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: __lowerCAmelCase : Optional[Any] = generator with self.progress_bar(total=lowerCAmelCase ): for i, t in enumerate(lowerCAmelCase ): # expand the latents if we are doing classifier free guidance __lowerCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowerCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase ) # predict the noise residual __lowerCAmelCase : Union[str, Any] = self.unet(lowerCAmelCase , lowerCAmelCase , encoder_hidden_states=lowerCAmelCase ).sample # perform classifier free guidance if do_classifier_free_guidance: __lowerCAmelCase ,__lowerCAmelCase : Any = noise_pred.chunk(2 ) __lowerCAmelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: __lowerCAmelCase : Any = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) __lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.cond_fn( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) # compute the previous noisy sample x_t -> x_t-1 __lowerCAmelCase : List[Any] = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowerCAmelCase : Tuple = 1 / 0.1_8215 * latents __lowerCAmelCase : Optional[int] = self.vae.decode(lowerCAmelCase ).sample __lowerCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCAmelCase : str = self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=lowerCAmelCase , nsfw_content_detected=lowerCAmelCase )
651
import math def snake_case_ (__A : int = 1_0_0 ) -> int: __lowerCAmelCase : List[str] = sum(i * i for i in range(1 , n + 1 ) ) __lowerCAmelCase : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F'{solution() = }')
651
1
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _lowerCAmelCase : def __init__(self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase="divided_space_time" , lowercase=None , ): A_ : Tuple = parent A_ : Optional[int] = batch_size A_ : Optional[int] = image_size A_ : str = num_channels A_ : Any = patch_size A_ : Union[str, Any] = num_frames A_ : Dict = is_training A_ : Dict = use_labels A_ : List[str] = hidden_size A_ : int = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : int = intermediate_size A_ : Any = hidden_act A_ : int = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : List[str] = attention_type A_ : str = initializer_range A_ : List[Any] = scope A_ : Dict = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token A_ : List[Any] = (image_size // patch_size) ** 2 A_ : Any = (num_frames) * self.num_patches_per_frame + 1 def _a (self ): A_ : Union[str, Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) A_ : str = None if self.use_labels: A_ : int = ids_tensor([self.batch_size] , self.num_labels ) A_ : int = self.get_config() return config, pixel_values, labels def _a (self ): A_ : List[str] = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) A_ : List[str] = self.num_labels return config def _a (self , lowercase , lowercase , lowercase ): A_ : List[str] = TimesformerModel(config=lowercase ) model.to(lowercase ) model.eval() A_ : Optional[int] = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a (self , lowercase , lowercase , lowercase ): A_ : Optional[int] = TimesformerForVideoClassification(lowercase ) model.to(lowercase ) model.eval() A_ : Optional[int] = model(lowercase ) # verify the logits shape A_ : int = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowercase ) def _a (self ): A_ : Tuple = self.prepare_config_and_inputs() A_, A_, A_ : str = config_and_inputs A_ : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Dict = ( {'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : int = False def _a (self ): A_ : int = TimesformerModelTester(self ) A_ : Dict = ConfigTester( self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 ) def _a (self , lowercase , lowercase , lowercase=False ): A_ : Any = copy.deepcopy(lowercase ) if return_labels: if model_class in get_values(lowercase ): A_ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase ) return inputs_dict def _a (self ): self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def _a (self ): pass def _a (self ): A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Any = model_class(lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) ) def _a (self ): A_, A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Any = model_class(lowercase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Optional[Any] = [*signature.parameters.keys()] A_ : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase ) def _a (self ): A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def _a (self ): A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase ) @slow def _a (self ): for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Tuple = TimesformerModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def _a (self ): if not self.has_attentions: pass else: A_, A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = True for model_class in self.all_model_classes: A_ : List[Any] = self.model_tester.seq_length A_ : Union[str, Any] = self.model_tester.num_frames A_ : Any = True A_ : int = False A_ : List[str] = True A_ : Any = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) ) A_ : int = outputs.attentions self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A_ : Optional[int] = True A_ : Optional[Any] = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): A_ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase ) ) A_ : Dict = outputs.attentions self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) A_ : Optional[int] = len(lowercase ) # Check attention is always last and order is fine A_ : str = True A_ : Optional[int] = True A_ : List[Any] = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): A_ : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) ) self.assertEqual(out_len + 1 , len(lowercase ) ) A_ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def _a (self ): def check_hidden_states_output(lowercase , lowercase , lowercase ): A_ : List[str] = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(lowercase , lowercase ) ) A_ : Union[str, Any] = outputs.hidden_states A_ : Optional[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase ) , lowercase ) A_ : Any = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) A_, A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : Dict = True check_hidden_states_output(lowercase , lowercase , lowercase ) def a ( ): '''simple docstring''' A_ : List[str] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) A_ : List[str] = np.load(lowerCamelCase__ ) return list(lowerCamelCase__ ) @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def _a (self ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _a (self ): A_ : Union[str, Any] = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( lowercase ) A_ : List[Any] = self.default_image_processor A_ : Union[str, Any] = prepare_video() A_ : List[Any] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase ) # forward pass with torch.no_grad(): A_ : Tuple = model(**lowercase ) # verify the logits A_ : Union[str, Any] = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , lowercase ) A_ : str = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
686
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCamelCase__ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def a ( ): '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCamelCase__ ): http_head("""https://huggingface.co""" )
686
1
'''simple docstring''' import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __lowercase (a_ ): def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : str=1 , ): UpperCamelCase__ : Union[str, Any] = parent UpperCamelCase__ : Union[str, Any] = batch_size UpperCamelCase__ : Any = seq_length UpperCamelCase__ : Optional[Any] = is_training UpperCamelCase__ : int = use_input_mask UpperCamelCase__ : Optional[Any] = use_token_type_ids UpperCamelCase__ : Optional[int] = use_labels UpperCamelCase__ : int = vocab_size UpperCamelCase__ : Optional[int] = hidden_size UpperCamelCase__ : Optional[int] = num_hidden_layers UpperCamelCase__ : List[Any] = num_attention_heads UpperCamelCase__ : List[str] = intermediate_size UpperCamelCase__ : str = hidden_act UpperCamelCase__ : Optional[Any] = hidden_dropout_prob UpperCamelCase__ : int = attention_probs_dropout_prob UpperCamelCase__ : List[str] = max_position_embeddings UpperCamelCase__ : Union[str, Any] = type_vocab_size UpperCamelCase__ : Tuple = type_sequence_label_size UpperCamelCase__ : Optional[int] = initializer_range UpperCamelCase__ : List[Any] = num_labels UpperCamelCase__ : Union[str, Any] = num_choices UpperCamelCase__ : List[Any] = scope UpperCamelCase__ : Tuple = q_groups UpperCamelCase__ : Tuple = k_groups UpperCamelCase__ : Tuple = v_groups UpperCamelCase__ : List[str] = post_attention_groups UpperCamelCase__ : Tuple = intermediate_groups UpperCamelCase__ : Tuple = output_groups def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCamelCase__ : Dict = None if self.use_input_mask: UpperCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length]) UpperCamelCase__ : List[str] = None UpperCamelCase__ : Tuple = None UpperCamelCase__ : Optional[int] = None if self.use_labels: UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices) UpperCamelCase__ : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self : List[str]): return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]): UpperCamelCase__ : Any = SqueezeBertModel(config=lowercase_) model.to(lowercase_) model.eval() UpperCamelCase__ : Optional[Any] = model(lowercase_ , lowercase_) UpperCamelCase__ : Tuple = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int): UpperCamelCase__ : Optional[int] = SqueezeBertForMaskedLM(config=lowercase_) model.to(lowercase_) model.eval() UpperCamelCase__ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str): UpperCamelCase__ : int = SqueezeBertForQuestionAnswering(config=lowercase_) model.to(lowercase_) model.eval() UpperCamelCase__ : List[str] = model( lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict): UpperCamelCase__ : List[str] = self.num_labels UpperCamelCase__ : Any = SqueezeBertForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() UpperCamelCase__ : str = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple): UpperCamelCase__ : str = self.num_labels UpperCamelCase__ : Any = SqueezeBertForTokenClassification(config=lowercase_) model.to(lowercase_) model.eval() UpperCamelCase__ : int = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]): UpperCamelCase__ : int = self.num_choices UpperCamelCase__ : int = SqueezeBertForMultipleChoice(config=lowercase_) model.to(lowercase_) model.eval() UpperCamelCase__ : Dict = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCamelCase__ : Any = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() UpperCamelCase__ : Dict = model( lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def __UpperCamelCase ( self : str): UpperCamelCase__ : Optional[Any] = self.prepare_config_and_inputs() ((UpperCamelCase__), (UpperCamelCase__), (UpperCamelCase__), (UpperCamelCase__), (UpperCamelCase__), (UpperCamelCase__)) : int = config_and_inputs UpperCamelCase__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase (a_ , a_ , unittest.TestCase ): _lowerCamelCase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) _lowerCamelCase = ( { '''feature-extraction''': SqueezeBertModel, '''fill-mask''': SqueezeBertForMaskedLM, '''question-answering''': SqueezeBertForQuestionAnswering, '''text-classification''': SqueezeBertForSequenceClassification, '''token-classification''': SqueezeBertForTokenClassification, '''zero-shot''': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = False def __UpperCamelCase ( self : str): UpperCamelCase__ : Any = SqueezeBertModelTester(self) UpperCamelCase__ : str = ConfigTester(self , config_class=lowercase_ , dim=37) def __UpperCamelCase ( self : Tuple): self.config_tester.run_common_tests() def __UpperCamelCase ( self : Dict): UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*lowercase_) def __UpperCamelCase ( self : int): UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_) def __UpperCamelCase ( self : str): UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_) def __UpperCamelCase ( self : Dict): UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_) def __UpperCamelCase ( self : Tuple): UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_) @slow def __UpperCamelCase ( self : List[Any]): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ : Dict = SqueezeBertModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) @require_sentencepiece @require_tokenizers @require_torch class __lowercase (unittest.TestCase ): @slow def __UpperCamelCase ( self : List[str]): UpperCamelCase__ : Any = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli') UpperCamelCase__ : List[str] = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]]) UpperCamelCase__ : List[Any] = model(lowercase_)[0] UpperCamelCase__ : int = torch.Size((1, 3)) self.assertEqual(output.shape , lowercase_) UpperCamelCase__ : Optional[Any] = torch.tensor([[0.64_01, -0.03_49, -0.60_41]]) self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-4))
596
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) lowerCamelCase_ = logging.getLogger() def lowerCamelCase ( a_ ) -> Optional[int]: lowerCAmelCase_ = {} lowerCAmelCase_ = os.path.join(a_ , 'all_results.json' ) if os.path.exists(a_ ): with open(a_ , 'r' ) as f: lowerCAmelCase_ = json.load(a_ ) else: raise ValueError(F'''can\'t find {path}''' ) return results lowerCamelCase_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class a_ ( a_ ): '''simple docstring''' def _lowercase ( self ) -> Tuple: '''simple docstring''' import xla_spawn lowerCAmelCase_ = self.get_auto_remove_tmp_dir() lowerCAmelCase_ = f''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(lowercase_ , 'argv' , lowercase_ ): lowerCAmelCase_ = time() xla_spawn.main() lowerCAmelCase_ = time() lowerCAmelCase_ = get_results(lowercase_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 5_0_0 ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' import xla_spawn lowerCAmelCase_ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(lowercase_ , 'argv' , lowercase_ ): xla_spawn.main()
318
0
'''simple docstring''' import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ) -> List[str]: def get_masked_lm_array(_UpperCAmelCase : str ): __snake_case : Tuple = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' __snake_case : Tuple = tf.train.load_variable(_UpperCAmelCase ,_UpperCAmelCase ) if "kernel" in name: __snake_case : Union[str, Any] = array.transpose() return torch.from_numpy(_UpperCAmelCase ) def get_encoder_array(_UpperCAmelCase : str ): __snake_case : Dict = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' __snake_case : Tuple = tf.train.load_variable(_UpperCAmelCase ,_UpperCAmelCase ) if "kernel" in name: __snake_case : Any = array.transpose() return torch.from_numpy(_UpperCAmelCase ) def get_encoder_layer_array(_UpperCAmelCase : int ,_UpperCAmelCase : str ): __snake_case : Dict = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' __snake_case : Tuple = tf.train.load_variable(_UpperCAmelCase ,_UpperCAmelCase ) if "kernel" in name: __snake_case : List[str] = array.transpose() return torch.from_numpy(_UpperCAmelCase ) def get_encoder_attention_layer_array(_UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : str ): __snake_case : Tuple = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' __snake_case : Optional[Any] = tf.train.load_variable(_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : str = array.reshape(_UpperCAmelCase ) if "kernel" in name: __snake_case : Dict = array.transpose() return torch.from_numpy(_UpperCAmelCase ) print(f'''Loading model based on config from {config_path}...''' ) __snake_case : int = BertConfig.from_json_file(_UpperCAmelCase ) __snake_case : List[Any] = BertForMaskedLM(_UpperCAmelCase ) # Layers for layer_index in range(0 ,config.num_hidden_layers ): __snake_case : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention __snake_case : BertSelfAttention = layer.attention.self __snake_case : Union[str, Any] = get_encoder_attention_layer_array( _UpperCAmelCase ,'_query_dense/kernel' ,self_attn.query.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _UpperCAmelCase ,'_query_dense/bias' ,self_attn.query.bias.data.shape ) __snake_case : List[Any] = get_encoder_attention_layer_array( _UpperCAmelCase ,'_key_dense/kernel' ,self_attn.key.weight.data.shape ) __snake_case : List[str] = get_encoder_attention_layer_array( _UpperCAmelCase ,'_key_dense/bias' ,self_attn.key.bias.data.shape ) __snake_case : Optional[int] = get_encoder_attention_layer_array( _UpperCAmelCase ,'_value_dense/kernel' ,self_attn.value.weight.data.shape ) __snake_case : Optional[int] = get_encoder_attention_layer_array( _UpperCAmelCase ,'_value_dense/bias' ,self_attn.value.bias.data.shape ) # Self-attention Output __snake_case : BertSelfOutput = layer.attention.output __snake_case : Any = get_encoder_attention_layer_array( _UpperCAmelCase ,'_output_dense/kernel' ,self_output.dense.weight.data.shape ) __snake_case : Tuple = get_encoder_attention_layer_array( _UpperCAmelCase ,'_output_dense/bias' ,self_output.dense.bias.data.shape ) __snake_case : Optional[int] = get_encoder_layer_array(_UpperCAmelCase ,'_attention_layer_norm/gamma' ) __snake_case : Tuple = get_encoder_layer_array(_UpperCAmelCase ,'_attention_layer_norm/beta' ) # Intermediate __snake_case : BertIntermediate = layer.intermediate __snake_case : List[str] = get_encoder_layer_array(_UpperCAmelCase ,'_intermediate_dense/kernel' ) __snake_case : Optional[int] = get_encoder_layer_array(_UpperCAmelCase ,'_intermediate_dense/bias' ) # Output __snake_case : BertOutput = layer.output __snake_case : Dict = get_encoder_layer_array(_UpperCAmelCase ,'_output_dense/kernel' ) __snake_case : Union[str, Any] = get_encoder_layer_array(_UpperCAmelCase ,'_output_dense/bias' ) __snake_case : List[str] = get_encoder_layer_array(_UpperCAmelCase ,'_output_layer_norm/gamma' ) __snake_case : Tuple = get_encoder_layer_array(_UpperCAmelCase ,'_output_layer_norm/beta' ) # Embeddings __snake_case : Tuple = get_encoder_array('_position_embedding_layer/embeddings' ) __snake_case : Optional[int] = get_encoder_array('_type_embedding_layer/embeddings' ) __snake_case : int = get_encoder_array('_embedding_norm_layer/gamma' ) __snake_case : str = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head __snake_case : Tuple = model.cls.predictions.transform __snake_case : Optional[Any] = get_masked_lm_array('dense/kernel' ) __snake_case : Dict = get_masked_lm_array('dense/bias' ) __snake_case : Dict = get_masked_lm_array('layer_norm/gamma' ) __snake_case : Dict = get_masked_lm_array('layer_norm/beta' ) __snake_case : str = get_masked_lm_array('embedding_table' ) # Pooling __snake_case : List[Any] = BertPooler(config=_UpperCAmelCase ) __snake_case : BertPooler = get_encoder_array('_pooler_layer/kernel' ) __snake_case : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(_UpperCAmelCase ) # Integration test - should load without any errors ;) __snake_case : Dict = BertForMaskedLM.from_pretrained(_UpperCAmelCase ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": A__ : Any = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model.''', ) A__ : Optional[Any] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
124
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def a_ ( _UpperCAmelCase : int ) -> Optional[Any]: __snake_case : Any = int(_UpperCAmelCase ) __snake_case , __snake_case , __snake_case : Union[str, Any] = t // 36_00, (t // 60) % 60, t % 60 return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}''' def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[int]=3_00 ) -> Dict: # docstyle-ignore return f''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def a_ ( _UpperCAmelCase : List[str] ) -> Optional[int]: __snake_case : Any = '<table border="1" class="dataframe">\n' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: __snake_case : List[Any] = f'''{elt:.6f}''' if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else str(_UpperCAmelCase ) html_code += f''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class snake_case__ : A__ = 5 A__ = 0.2 def __init__( self : List[Any] , __a : int , __a : Optional[str] = None , __a : bool = True , __a : Optional["NotebookTrainingTracker"] = None , __a : int = 300 , ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[Any] = total __snake_case : Dict = '' if prefix is None else prefix __snake_case : Tuple = leave __snake_case : Dict = parent __snake_case : List[Any] = width __snake_case : str = None __snake_case : Tuple = None __snake_case : str = None def A_ ( self : Union[str, Any] , __a : int , __a : bool = False , __a : str = None ) -> Tuple: '''simple docstring''' __snake_case : List[Any] = value if comment is not None: __snake_case : Optional[int] = comment if self.last_value is None: __snake_case : Dict = time.time() __snake_case : Dict = value __snake_case : Tuple = None __snake_case : Union[str, Any] = self.warmup __snake_case : List[str] = 1 self.update_bar(__a ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 __snake_case : Tuple = time.time() __snake_case : int = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: __snake_case : List[str] = self.elapsed_time / (value - self.start_value) else: __snake_case : str = None if value >= self.total: __snake_case : str = self.total __snake_case : List[str] = None if not self.leave: self.close() elif self.average_time_per_item is not None: __snake_case : Dict = self.average_time_per_item * (self.total - value) self.update_bar(__a ) __snake_case : Optional[int] = value __snake_case : Union[str, Any] = current_time if self.average_time_per_item is None: __snake_case : Optional[Any] = 1 else: __snake_case : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 ) def A_ ( self : Any , __a : List[str] , __a : Tuple=None ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[int] = ' ' * (len(str(self.total ) ) - len(str(__a ) )) + str(__a ) if self.elapsed_time is None: __snake_case : Any = f'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: __snake_case : Optional[int] = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: __snake_case : Union[str, Any] = ( f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' f''' {format_time(self.predicted_remaining )}''' ) self.label += f''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]''' self.display() def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: __snake_case : str = disp.display(disp.HTML(self.html_code ) , display_id=__a ) else: self.output.update(disp.HTML(self.html_code ) ) def A_ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' if self.parent is None and self.output is not None: self.output.update(disp.HTML('' ) ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Optional[int] , __a : int , __a : str=None ) -> Union[str, Any]: '''simple docstring''' super().__init__(__a ) __snake_case : Tuple = None if column_names is None else [column_names] __snake_case : Any = None def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[int] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: __snake_case : Any = disp.display(disp.HTML(self.html_code ) , display_id=__a ) else: self.output.update(disp.HTML(self.html_code ) ) def A_ ( self : Dict , __a : int ) -> int: '''simple docstring''' if self.inner_table is None: __snake_case : List[Any] = [list(values.keys() ), list(values.values() )] else: __snake_case : List[Any] = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(__a ) __snake_case : List[str] = columns self.inner_table.append([values[c] for c in columns] ) def A_ ( self : List[str] , __a : Tuple , __a : List[str]=None , __a : Dict=300 ) -> Tuple: '''simple docstring''' __snake_case : Tuple = NotebookProgressBar(__a , prefix=__a , parent=self , width=__a ) return self.child_bar def A_ ( self : List[str] ) -> int: '''simple docstring''' __snake_case : List[str] = None self.display() class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Optional[int] ) -> Any: '''simple docstring''' __snake_case : Optional[int] = None __snake_case : Dict = None __snake_case : List[str] = False def A_ ( self : Dict , __a : List[str] , __a : Optional[Any] , __a : int , **__a : Optional[Any] ) -> int: '''simple docstring''' __snake_case : Optional[Any] = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step' __snake_case : List[str] = 0 __snake_case : str = 0 __snake_case : Any = [self.first_column] + ['Training Loss'] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('Validation Loss' ) __snake_case : Optional[Any] = NotebookTrainingTracker(state.max_steps , __a ) def A_ ( self : List[Any] , __a : Tuple , __a : str , __a : int , **__a : Optional[int] ) -> Optional[int]: '''simple docstring''' __snake_case : Optional[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , ) __snake_case : List[str] = False def A_ ( self : Optional[int] , __a : List[Any] , __a : Optional[int] , __a : List[Any] , __a : Dict=None , **__a : Tuple ) -> Tuple: '''simple docstring''' if not has_length(__a ): return if self.prediction_bar is None: if self.training_tracker is not None: __snake_case : Optional[Any] = self.training_tracker.add_child(len(__a ) ) else: __snake_case : str = NotebookProgressBar(len(__a ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def A_ ( self : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : Union[str, Any] , **__a : Dict ) -> Tuple: '''simple docstring''' if self.prediction_bar is not None: self.prediction_bar.close() __snake_case : str = None def A_ ( self : Any , __a : List[str] , __a : List[Any] , __a : Optional[Any] , __a : Any=None , **__a : Optional[Any] ) -> Optional[Any]: '''simple docstring''' # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: __snake_case : Tuple = {'Training Loss': logs['loss']} # First column is necessarily Step sine we're not in epoch eval strategy __snake_case : str = state.global_step self.training_tracker.write_line(__a ) def A_ ( self : str , __a : Tuple , __a : Dict , __a : Optional[int] , __a : Optional[int]=None , **__a : List[str] ) -> Tuple: '''simple docstring''' if self.training_tracker is not None: __snake_case : int = {'Training Loss': 'No log', 'Validation Loss': 'No log'} for log in reversed(state.log_history ): if "loss" in log: __snake_case : Union[str, Any] = log['loss'] break if self.first_column == "Epoch": __snake_case : List[str] = int(state.epoch ) else: __snake_case : Union[str, Any] = state.global_step __snake_case : Union[str, Any] = 'eval' for k in metrics: if k.endswith('_loss' ): __snake_case : Any = re.sub(r'\_loss$' , '' , __a ) __snake_case : Union[str, Any] = metrics.pop('total_flos' , __a ) __snake_case : Optional[int] = metrics.pop('epoch' , __a ) __snake_case : List[str] = metrics.pop(f'''{metric_key_prefix}_runtime''' , __a ) __snake_case : Dict = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , __a ) __snake_case : Optional[Any] = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , __a ) __snake_case : str = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , __a ) for k, v in metrics.items(): if k == f'''{metric_key_prefix}_loss''': __snake_case : Union[str, Any] = v else: __snake_case : Dict = k.split('_' ) __snake_case : Tuple = ' '.join([part.capitalize() for part in splits[1:]] ) __snake_case : List[Any] = v self.training_tracker.write_line(__a ) self.training_tracker.remove_child() __snake_case : str = None # Evaluation takes a long time so we should force the next update. __snake_case : str = True def A_ ( self : List[Any] , __a : int , __a : Optional[int] , __a : Optional[Any] , **__a : Optional[Any] ) -> int: '''simple docstring''' self.training_tracker.update( state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__a ) __snake_case : Tuple = None
124
1
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging a : Any = logging.get_logger(__name__) class _a : A = 42 A = None @staticmethod def __snake_case () -> List[str]: raise NotImplementedError def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]: raise NotImplementedError def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: raise NotImplementedError def __snake_case (self ) -> str: if not self.is_available(): raise RuntimeError( f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def __snake_case (cls ) -> Any: return f'`pip install {cls.pip_package or cls.name}`' class _a ( _lowerCAmelCase ): A = '''optuna''' @staticmethod def __snake_case () -> List[str]: return is_optuna_available() def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]: return run_hp_search_optuna(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Any: return default_hp_space_optuna(SCREAMING_SNAKE_CASE_ ) class _a ( _lowerCAmelCase ): A = '''ray''' A = '''\'ray[tune]\'''' @staticmethod def __snake_case () -> Optional[Any]: return is_ray_available() def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: return run_hp_search_ray(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: return default_hp_space_ray(SCREAMING_SNAKE_CASE_ ) class _a ( _lowerCAmelCase ): A = '''sigopt''' @staticmethod def __snake_case () -> Any: return is_sigopt_available() def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> str: return run_hp_search_sigopt(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[str]: return default_hp_space_sigopt(SCREAMING_SNAKE_CASE_ ) class _a ( _lowerCAmelCase ): A = '''wandb''' @staticmethod def __snake_case () -> List[Any]: return is_wandb_available() def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int: return run_hp_search_wandb(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Tuple: return default_hp_space_wandb(SCREAMING_SNAKE_CASE_ ) a : Any = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCAmelCase_ (): """simple docstring""" UpperCAmelCase_: Dict = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowerCAmelCase__ ) > 0: UpperCAmelCase_: Dict = available_backends[0].name if len(lowerCAmelCase__ ) > 1: logger.info( F'{len(lowerCAmelCase__ )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( F' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
556
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _a : def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=36, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> Tuple: UpperCAmelCase_: Any = parent UpperCAmelCase_: Union[str, Any] = batch_size UpperCAmelCase_: int = seq_length UpperCAmelCase_: Any = is_training UpperCAmelCase_: str = use_input_mask UpperCAmelCase_: Dict = use_token_type_ids UpperCAmelCase_: Any = use_labels UpperCAmelCase_: Any = vocab_size UpperCAmelCase_: Dict = embedding_size UpperCAmelCase_: Dict = hidden_size UpperCAmelCase_: Dict = num_hidden_layers UpperCAmelCase_: Dict = num_hidden_groups UpperCAmelCase_: int = num_attention_heads UpperCAmelCase_: List[str] = intermediate_size UpperCAmelCase_: Any = hidden_act UpperCAmelCase_: Dict = hidden_dropout_prob UpperCAmelCase_: Any = attention_probs_dropout_prob UpperCAmelCase_: Tuple = max_position_embeddings UpperCAmelCase_: Tuple = type_vocab_size UpperCAmelCase_: List[Any] = type_sequence_label_size UpperCAmelCase_: Any = initializer_range UpperCAmelCase_: int = num_labels UpperCAmelCase_: Optional[int] = num_choices UpperCAmelCase_: Tuple = scope def __snake_case (self ) -> List[Any]: UpperCAmelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase_: Tuple = None if self.use_input_mask: UpperCAmelCase_: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_: Dict = None if self.use_token_type_ids: UpperCAmelCase_: int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) UpperCAmelCase_: Tuple = None UpperCAmelCase_: Dict = None UpperCAmelCase_: Tuple = None if self.use_labels: UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase_: Dict = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size], self.num_choices ) UpperCAmelCase_: Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __snake_case (self ) -> Optional[Any]: return AlbertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, num_hidden_groups=self.num_hidden_groups, ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: UpperCAmelCase_: int = AlbertModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: List[Any] = model(SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str: UpperCAmelCase_: Any = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase_: List[Any] = model( SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, sentence_order_label=SCREAMING_SNAKE_CASE_, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCAmelCase_: Optional[int] = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase_: Optional[Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCAmelCase_: Optional[Any] = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase_: Any = model( SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, start_positions=SCREAMING_SNAKE_CASE_, end_positions=SCREAMING_SNAKE_CASE_, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any: UpperCAmelCase_: Optional[Any] = self.num_labels UpperCAmelCase_: List[Any] = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCAmelCase_: List[str] = self.num_labels UpperCAmelCase_: Union[str, Any] = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCAmelCase_: Dict = self.num_choices UpperCAmelCase_: Dict = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCAmelCase_: str = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() UpperCAmelCase_: str = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() UpperCAmelCase_: List[Any] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() UpperCAmelCase_: Union[str, Any] = model( SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def __snake_case (self ) -> Union[str, Any]: UpperCAmelCase_: Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ): List[str] = config_and_inputs UpperCAmelCase_: Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): A = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A = ( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) A = True def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Any: UpperCAmelCase_: Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE_ ): UpperCAmelCase_: Dict = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[int] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=SCREAMING_SNAKE_CASE_ ) return inputs_dict def __snake_case (self ) -> Optional[Any]: UpperCAmelCase_: Any = AlbertModelTester(self ) UpperCAmelCase_: Union[str, Any] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 ) def __snake_case (self ) -> List[Any]: self.config_tester.run_common_tests() def __snake_case (self ) -> List[Any]: UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Any: UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Optional[Any]: UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> str: UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Any: UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> str: UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def __snake_case (self ) -> Dict: UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_: Union[str, Any] = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) @slow def __snake_case (self ) -> List[Any]: for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_: Dict = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @require_torch class _a ( unittest.TestCase ): @slow def __snake_case (self ) -> Dict: UpperCAmelCase_: str = AlbertModel.from_pretrained("""albert-base-v2""" ) UpperCAmelCase_: List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) UpperCAmelCase_: List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )[0] UpperCAmelCase_: List[str] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape, SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Dict = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
556
1
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=False ) -> List[str]: try: lowerCamelCase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowerCamelCase__ : Optional[Any] = default else: # KEY is set, convert it to True or False. try: lowerCamelCase__ : Any = strtobool(SCREAMING_SNAKE_CASE_ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value _UpperCAmelCase : Union[str, Any] = parse_flag_from_env("""RUN_SLOW""", default=False) _UpperCAmelCase : Tuple = parse_flag_from_env("""RUN_REMOTE""", default=False) _UpperCAmelCase : Optional[Any] = parse_flag_from_env("""RUN_LOCAL""", default=True) _UpperCAmelCase : Tuple = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression _UpperCAmelCase : int = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") _UpperCAmelCase : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") _UpperCAmelCase : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio _UpperCAmelCase : Optional[Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """, ) # Beam _UpperCAmelCase : Tuple = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility _UpperCAmelCase : int = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows _UpperCAmelCase : Any = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any: try: import faiss # noqa except ImportError: lowerCamelCase__ : Union[str, Any] = unittest.skip('test requires faiss' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict: try: import regex # noqa except ImportError: lowerCamelCase__ : Dict = unittest.skip('test requires regex' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int: try: import elasticsearch # noqa except ImportError: lowerCamelCase__ : List[str] = unittest.skip('test requires elasticsearch' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]: try: import sqlalchemy # noqa except ImportError: lowerCamelCase__ : Tuple = unittest.skip('test requires sqlalchemy' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]: if not config.TORCH_AVAILABLE: lowerCamelCase__ : int = unittest.skip('test requires PyTorch' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict: if not config.TF_AVAILABLE: lowerCamelCase__ : List[str] = unittest.skip('test requires TensorFlow' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any: if not config.JAX_AVAILABLE: lowerCamelCase__ : List[str] = unittest.skip('test requires JAX' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any: if not config.PIL_AVAILABLE: lowerCamelCase__ : Dict = unittest.skip('test requires Pillow' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]: try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(SCREAMING_SNAKE_CASE_ ) else: return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]: try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(SCREAMING_SNAKE_CASE_ ) else: return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]: try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(SCREAMING_SNAKE_CASE_ ) else: return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict: def _require_spacy_model(_UpperCAmelCase ): try: import spacy # noqa F401 spacy.load(SCREAMING_SNAKE_CASE_ ) except ImportError: return unittest.skip('test requires spacy' )(SCREAMING_SNAKE_CASE_ ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(SCREAMING_SNAKE_CASE_ ) )(SCREAMING_SNAKE_CASE_ ) else: return test_case return _require_spacy_model def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]: try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(SCREAMING_SNAKE_CASE_ ) else: return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict: try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(SCREAMING_SNAKE_CASE_ ) else: return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str: if not _run_slow_tests or _run_slow_tests == 0: lowerCamelCase__ : List[str] = unittest.skip('test is slow' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]: if not _run_local_tests or _run_local_tests == 0: lowerCamelCase__ : List[str] = unittest.skip('test is local' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict: if not _run_packaged_tests or _run_packaged_tests == 0: lowerCamelCase__ : List[Any] = unittest.skip('test is packaged' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]: if not _run_remote_tests or _run_remote_tests == 0: lowerCamelCase__ : Optional[Any] = unittest.skip('test requires remote' )(SCREAMING_SNAKE_CASE_ ) return test_case def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase ) -> Tuple: def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(SCREAMING_SNAKE_CASE_ ) and name.startswith('test' ): for decorator in decorators: lowerCamelCase__ : Union[str, Any] = decorator(SCREAMING_SNAKE_CASE_ ) setattr(cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return cls return decorate class lowerCAmelCase ( lowercase__ ): pass class lowerCAmelCase ( lowercase__ ): UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 UpperCAmelCase__ = 2 @contextmanager def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=OfflineSimulationMode.CONNECTION_FAILS , _UpperCAmelCase=1e-16 ) -> Dict: lowerCamelCase__ : Any = requests.Session().request def timeout_request(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): # Change the url to an invalid url so that the connection hangs lowerCamelCase__ : Optional[int] = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" ) lowerCamelCase__ : Tuple = timeout try: return online_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowerCamelCase__ : List[Any] = url lowerCamelCase__ : List[str] = e.args[0] lowerCamelCase__ : Tuple = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),) lowerCamelCase__ : Tuple = (max_retry_error,) raise def raise_connection_error(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): raise requests.ConnectionError('Offline mode is enabled.' , request=SCREAMING_SNAKE_CASE_ ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , SCREAMING_SNAKE_CASE_ ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , SCREAMING_SNAKE_CASE_ ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]: lowerCamelCase__ : str = str(Path().resolve() ) with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) as tmp_dir: try: os.chdir(SCREAMING_SNAKE_CASE_ ) yield finally: os.chdir(SCREAMING_SNAKE_CASE_ ) @contextmanager def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: import gc gc.collect() lowerCamelCase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def SCREAMING_SNAKE_CASE ( ) -> Tuple: import gc gc.collect() lowerCamelCase__ : Optional[Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: return deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist() def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any: import decorator from requests.exceptions import HTTPError def _wrapper(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ): try: return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) except HTTPError as err: if str(SCREAMING_SNAKE_CASE_ ).startswith('500' ) or str(SCREAMING_SNAKE_CASE_ ).startswith('502' ): pytest.xfail(str(SCREAMING_SNAKE_CASE_ ) ) raise err return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase : def __init__( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ) -> Dict: lowerCamelCase__ : Any = returncode lowerCamelCase__ : List[Any] = stdout lowerCamelCase__ : Union[str, Any] = stderr async def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: while True: lowerCamelCase__ : Tuple = await stream.readline() if line: callback(SCREAMING_SNAKE_CASE_ ) else: break async def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False ) -> Optional[int]: if echo: print('\nRunning: ' , ' '.join(SCREAMING_SNAKE_CASE_ ) ) lowerCamelCase__ : str = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE_ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowerCamelCase__ : Optional[int] = [] lowerCamelCase__ : Optional[Any] = [] def tee(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="" ): lowerCamelCase__ : int = line.decode('utf-8' ).rstrip() sink.append(SCREAMING_SNAKE_CASE_ ) if not quiet: print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , file=SCREAMING_SNAKE_CASE_ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _UpperCAmelCase : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda _UpperCAmelCase : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stderr , label='stderr:' ) ), ] , timeout=SCREAMING_SNAKE_CASE_ , ) return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=180 , _UpperCAmelCase=False , _UpperCAmelCase=True ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = asyncio.get_event_loop() lowerCamelCase__ : int = loop.run_until_complete( _stream_subprocess(SCREAMING_SNAKE_CASE_ , env=SCREAMING_SNAKE_CASE_ , stdin=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ , echo=SCREAMING_SNAKE_CASE_ ) ) lowerCamelCase__ : Any = ' '.join(SCREAMING_SNAKE_CASE_ ) if result.returncode > 0: lowerCamelCase__ : Optional[Any] = '\n'.join(result.stderr ) raise RuntimeError( F"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F"""\'{cmd_str}\' produced no output.""" ) return result def SCREAMING_SNAKE_CASE ( ) -> str: lowerCamelCase__ : List[Any] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) lowerCamelCase__ : List[str] = re.sub(r'^gw' , '' , SCREAMING_SNAKE_CASE_ , 0 , re.M ) return int(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE ( ) -> Any: lowerCamelCase__ : List[str] = 2_9500 lowerCamelCase__ : Union[str, Any] = pytest_xdist_worker_id() return port + uniq_delta
715
import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class lowerCAmelCase ( unittest.TestCase ): def A_ ( self : str ) -> Union[str, Any]: lowerCamelCase__ : Optional[int] = 'laion/clap-htsat-unfused' lowerCamelCase__ : List[Any] = tempfile.mkdtemp() def A_ ( self : Optional[int] , **UpperCAmelCase : int ) -> Optional[Any]: return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase ) def A_ ( self : Union[str, Any] , **UpperCAmelCase : Any ) -> List[str]: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase ) def A_ ( self : List[str] ) -> Dict: shutil.rmtree(self.tmpdirname ) def A_ ( self : Optional[Any] ) -> Optional[Any]: lowerCamelCase__ : Tuple = self.get_tokenizer() lowerCamelCase__ : Dict = self.get_feature_extractor() lowerCamelCase__ : Dict = ClapProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase ) def A_ ( self : str ) -> int: lowerCamelCase__ : Optional[Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ : Dict = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) lowerCamelCase__ : Dict = self.get_feature_extractor(do_normalize=UpperCAmelCase , padding_value=1.0 ) lowerCamelCase__ : str = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase ) def A_ ( self : List[Any] ) -> int: lowerCamelCase__ : Any = self.get_feature_extractor() lowerCamelCase__ : str = self.get_tokenizer() lowerCamelCase__ : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = floats_list((3, 1000) ) lowerCamelCase__ : Tuple = feature_extractor(UpperCAmelCase , return_tensors='np' ) lowerCamelCase__ : Tuple = processor(audios=UpperCAmelCase , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self : List[str] ) -> Tuple: lowerCamelCase__ : Dict = self.get_feature_extractor() lowerCamelCase__ : List[Any] = self.get_tokenizer() lowerCamelCase__ : Any = ClapProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = 'This is a test string' lowerCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase ) lowerCamelCase__ : Any = tokenizer(UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A_ ( self : Optional[int] ) -> Tuple: lowerCamelCase__ : Tuple = self.get_feature_extractor() lowerCamelCase__ : List[str] = self.get_tokenizer() lowerCamelCase__ : Optional[int] = ClapProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase ) lowerCamelCase__ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase ) lowerCamelCase__ : Tuple = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def A_ ( self : Optional[Any] ) -> int: lowerCamelCase__ : str = self.get_feature_extractor() lowerCamelCase__ : str = self.get_tokenizer() lowerCamelCase__ : Dict = ClapProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
188
0
def snake_case__ ( lowercase = 2000000 ): lowerCAmelCase_: Dict = [0 for i in range(n + 1 )] lowerCAmelCase_: List[Any] = 1 lowerCAmelCase_: Optional[Any] = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , lowercase ): lowerCAmelCase_: Any = 1 lowerCAmelCase_: Any = 0 for i in range(lowercase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
613
"""simple docstring""" from __future__ import annotations from math import pi def _lowerCamelCase( a , a , a ): if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
528
0
"""simple docstring""" import warnings from .generation import TFGenerationMixin class lowerCAmelCase ( snake_case ): # warning at import time warnings.warn( """Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """ """be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , snake_case , )
719
"""simple docstring""" def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list: """simple docstring""" _UpperCAmelCase = False while is_sorted is False: # Until all the indices are traversed keep looping _UpperCAmelCase = True for i in range(0,len(SCREAMING_SNAKE_CASE ) - 1,2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: _UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i] # swapping if elements not in order _UpperCAmelCase = False for i in range(1,len(SCREAMING_SNAKE_CASE ) - 1,2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: _UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i] # swapping if elements not in order _UpperCAmelCase = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') lowerCAmelCase_ = [int(x) for x in input().split()] # inputing elements of the list in one line lowerCAmelCase_ = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
494
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' @property def _UpperCAmelCase ( self : List[Any] ): torch.manual_seed(0 ) A__ : Union[str, Any] =UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def _UpperCAmelCase ( self : Union[str, Any] ): A__ : int =self.dummy_uncond_unet A__ : Any =PNDMScheduler() A__ : str =PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pndm.to(__UpperCAmelCase ) pndm.set_progress_bar_config(disable=__UpperCAmelCase ) A__ : List[Any] =torch.manual_seed(0 ) A__ : Optional[int] =pndm(generator=__UpperCAmelCase , num_inference_steps=20 , output_type="numpy" ).images A__ : str =torch.manual_seed(0 ) A__ : Tuple =pndm(generator=__UpperCAmelCase , num_inference_steps=20 , output_type="numpy" , return_dict=__UpperCAmelCase )[0] A__ : Optional[Any] =image[0, -3:, -3:, -1] A__ : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A__ : List[str] =np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : int ): A__ : Dict ="google/ddpm-cifar10-32" A__ : List[str] =UNetaDModel.from_pretrained(__UpperCAmelCase ) A__ : int =PNDMScheduler() A__ : Union[str, Any] =PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pndm.to(__UpperCAmelCase ) pndm.set_progress_bar_config(disable=__UpperCAmelCase ) A__ : Optional[int] =torch.manual_seed(0 ) A__ : Any =pndm(generator=__UpperCAmelCase , output_type="numpy" ).images A__ : List[str] =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A__ : Tuple =np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
656
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase__ = logging.getLogger(__name__) def _a ( a :Union[str, Any] , a :Tuple ) -> Optional[Any]: # save results if os.path.exists(a ): if os.path.exists(os.path.join(a , '''config.json''' ) ) and os.path.isfile( os.path.join(a , '''config.json''' ) ): os.remove(os.path.join(a , '''config.json''' ) ) if os.path.exists(os.path.join(a , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(a , '''pytorch_model.bin''' ) ): os.remove(os.path.join(a , '''pytorch_model.bin''' ) ) else: os.makedirs(a ) model.save_pretrained(a ) def _a ( a :List[Any] , a :Union[str, Any]=False ) -> int: a = 2 if unlogit: a = torch.pow(a , a ) a = p * torch.log(a ) a = 0 return -plogp.sum(dim=-1 ) def _a ( a :List[str] ) -> Union[str, Any]: logger.info('''lv, h >\t''' + '''\t'''.join(F"""{x + 1}""" for x in range(len(a ) ) ) ) for row in range(len(a ) ): if tensor.dtype != torch.long: logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) ) else: logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:d}""" for x in tensor[row].cpu().data ) ) def _a ( a :Optional[int] , a :Dict , a :Tuple , a :Tuple=True , a :Union[str, Any]=True , a :str=None , a :Union[str, Any]=False ) -> int: a , a = model.config.num_hidden_layers, model.config.num_attention_heads a = torch.zeros(a , a ).to(args.device ) a = torch.zeros(a , a ).to(args.device ) if head_mask is None: a = torch.ones(a , a ).to(args.device ) head_mask.requires_grad_(requires_grad=a ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: a = None a = 0.0 a = 0.0 for step, inputs in enumerate(tqdm(a , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): a = tuple(t.to(args.device ) for t in inputs ) ((a) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) a = model(a , labels=a , head_mask=a ) # (loss), lm_logits, presents, (all hidden_states), (attentions) a , a , a = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(a ): a = entropy(attn.detach() , a ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(a ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: a = 2 a = torch.pow(torch.pow(a , a ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(a ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(a ) logger.info('''Head ranked by importance scores''' ) a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) a = torch.arange( head_importance.numel() , device=args.device ) a = head_ranks.view_as(a ) print_ad_tensor(a ) return attn_entropy, head_importance, total_loss def _a ( a :Optional[Any] , a :List[Any] , a :str ) -> Optional[Any]: a , a , a = compute_heads_importance(a , a , a , compute_entropy=a ) a = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , a , original_score * args.masking_threshold ) a = torch.ones_like(a ) a = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) a = original_score while current_score >= original_score * args.masking_threshold: a = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads a = float('''Inf''' ) a = head_importance.view(-1 ).sort()[1] if len(a ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads a = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) a = new_head_mask.view(-1 ) a = 0.0 a = new_head_mask.view_as(a ) a = new_head_mask.clone().detach() print_ad_tensor(a ) # Compute metric and head importance again a , a , a = compute_heads_importance( a , a , a , compute_entropy=a , head_mask=a ) a = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , a , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(a ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( a :List[Any] , a :Optional[int] , a :Tuple , a :List[str] ) -> List[str]: a = datetime.now() a , a , a = compute_heads_importance( a , a , a , compute_entropy=a , compute_importance=a , head_mask=a ) a = 1 / loss a = datetime.now() - before_time a = sum(p.numel() for p in model.parameters() ) a = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(a ) ) } for k, v in heads_to_prune.items(): if isinstance(a , a ): a = [ v, ] assert sum(len(a ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(a ) a = sum(p.numel() for p in model.parameters() ) a = datetime.now() a , a , a = compute_heads_importance( a , a , a , compute_entropy=a , compute_importance=a , head_mask=a , actually_pruned=a , ) a = 1 / loss a = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , a , a , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , a , a ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(a , args.output_dir ) def _a ( ) -> int: a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=a , type=a , required=a , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=a , type=a , required=a , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=a , type=a , required=a , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=a , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=a , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=a , type=a , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=a , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=a , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=a , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=a , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=a , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=a , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=a , default=42 ) parser.add_argument('''--local_rank''' , type=a , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=a , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=a , default='''''' , help='''Can be used for distant debugging.''' ) a = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) a = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) a = torch.device('''cuda''' , args.local_rank ) a = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: a = nn.parallel.DistributedDataParallel( a , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=a ) elif args.n_gpu > 1: a = nn.DataParallel(a ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=a ) torch.save(a , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , a ) # Prepare dataset a = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) a = (torch.from_numpy(a ),) a = TensorDataset(*a ) a = RandomSampler(a ) a = DataLoader(a , sampler=a , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(a , a , a ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: a = mask_heads(a , a , a ) prune_heads(a , a , a , a ) if __name__ == "__main__": main()
117
0
"""simple docstring""" import baseaa def __lowerCAmelCase ( lowercase : str ) -> bytes: """simple docstring""" return baseaa.aaaencode(string.encode("utf-8" ) ) def __lowerCAmelCase ( lowercase : bytes ) -> str: """simple docstring""" return baseaa.aaadecode(lowercase ).decode("utf-8" ) if __name__ == "__main__": import doctest doctest.testmod()
117
"""simple docstring""" import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class _lowerCAmelCase ( unittest.TestCase ): def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=4 , ) -> Dict: '''simple docstring''' snake_case : Any = parent snake_case : int = batch_size snake_case : Optional[Any] = seq_length snake_case : Dict = is_training snake_case : str = use_attention_mask snake_case : List[Any] = use_token_type_ids snake_case : Tuple = use_labels snake_case : str = vocab_size snake_case : Optional[Any] = hidden_size snake_case : Optional[Any] = num_hidden_layers snake_case : Dict = num_attention_heads snake_case : int = intermediate_size snake_case : Union[str, Any] = hidden_act snake_case : Optional[Any] = hidden_dropout_prob snake_case : int = attention_probs_dropout_prob snake_case : Optional[int] = max_position_embeddings snake_case : Dict = type_vocab_size snake_case : str = type_sequence_label_size snake_case : List[Any] = initializer_range snake_case : Dict = num_choices def lowerCamelCase ( self ) -> Tuple: '''simple docstring''' snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case : Optional[int] = None if self.use_attention_mask: snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case : List[Any] = None if self.use_token_type_ids: snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case : Union[str, Any] = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase ( self ) -> Tuple: '''simple docstring''' snake_case : List[str] = self.prepare_config_and_inputs() snake_case ,snake_case ,snake_case ,snake_case : List[Any] = config_and_inputs snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def lowerCamelCase ( self ) -> Any: '''simple docstring''' snake_case : List[str] = self.prepare_config_and_inputs() snake_case ,snake_case ,snake_case ,snake_case : List[Any] = config_and_inputs snake_case : int = True snake_case : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _lowerCAmelCase ( snake_case_ , unittest.TestCase ): __UpperCAmelCase : int = True __UpperCAmelCase : Optional[Any] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase ( self ) -> Dict: '''simple docstring''' snake_case : Dict = FlaxRobertaModelTester(self ) @slow def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' for model_class_name in self.all_model_classes: snake_case : Dict = model_class_name.from_pretrained("roberta-base" , from_pt=UpperCamelCase__ ) snake_case : List[str] = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase__ )
117
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = { 'configuration_roberta_prelayernorm': [ 'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaPreLayerNormConfig', 'RobertaPreLayerNormOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaPreLayerNormForCausalLM', 'RobertaPreLayerNormForMaskedLM', 'RobertaPreLayerNormForMultipleChoice', 'RobertaPreLayerNormForQuestionAnswering', 'RobertaPreLayerNormForSequenceClassification', 'RobertaPreLayerNormForTokenClassification', 'RobertaPreLayerNormModel', 'RobertaPreLayerNormPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaPreLayerNormForCausalLM', 'TFRobertaPreLayerNormForMaskedLM', 'TFRobertaPreLayerNormForMultipleChoice', 'TFRobertaPreLayerNormForQuestionAnswering', 'TFRobertaPreLayerNormForSequenceClassification', 'TFRobertaPreLayerNormForTokenClassification', 'TFRobertaPreLayerNormMainLayer', 'TFRobertaPreLayerNormModel', 'TFRobertaPreLayerNormPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'FlaxRobertaPreLayerNormForCausalLM', 'FlaxRobertaPreLayerNormForMaskedLM', 'FlaxRobertaPreLayerNormForMultipleChoice', 'FlaxRobertaPreLayerNormForQuestionAnswering', 'FlaxRobertaPreLayerNormForSequenceClassification', 'FlaxRobertaPreLayerNormForTokenClassification', 'FlaxRobertaPreLayerNormModel', 'FlaxRobertaPreLayerNormPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
25
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase_ = 16 lowercase_ = 32 def UpperCAmelCase ( _lowercase : Accelerator , _lowercase : int = 1_6 , _lowercase : str = "bert-base-cased" ) -> Dict: """simple docstring""" lowerCAmelCase_ = AutoTokenizer.from_pretrained(_lowercase ) lowerCAmelCase_ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_lowercase : Optional[int] ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase_ = datasets.map( _lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_lowercase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCAmelCase_ = DataLoader( tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) lowerCAmelCase_ = DataLoader( tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) return train_dataloader, eval_dataloader def UpperCAmelCase ( _lowercase : int , _lowercase : Any , _lowercase : Any , _lowercase : str ) -> List[Any]: """simple docstring""" model.eval() lowerCAmelCase_ = 0 for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ = model(**_lowercase ) lowerCAmelCase_ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_lowercase ) - 1: lowerCAmelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCAmelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_lowercase , references=_lowercase , ) lowerCAmelCase_ = metric.compute() return eval_metric["accuracy"] def UpperCAmelCase ( _lowercase : Dict , _lowercase : Optional[Any] ) -> Dict: """simple docstring""" lowerCAmelCase_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ = config['''lr'''] lowerCAmelCase_ = int(config['''num_epochs'''] ) lowerCAmelCase_ = int(config['''seed'''] ) lowerCAmelCase_ = int(config['''batch_size'''] ) lowerCAmelCase_ = args.model_name_or_path set_seed(_lowercase ) lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(_lowercase , _lowercase , _lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase ) # Instantiate optimizer lowerCAmelCase_ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase_ = optimizer_cls(params=model.parameters() , lr=_lowercase ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: lowerCAmelCase_ = 1 lowerCAmelCase_ = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase_ = get_linear_schedule_with_warmup( optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , ) else: lowerCAmelCase_ = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase_ = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase_ = 0 lowerCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' ) lowerCAmelCase_ = num_epochs if args.partial_train_epoch is not None: lowerCAmelCase_ = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) lowerCAmelCase_ = args.resume_from_checkpoint.split('''epoch_''' )[1] lowerCAmelCase_ = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowerCAmelCase_ = int(_lowercase ) + 1 lowerCAmelCase_ = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase ) accelerator.print('''resumed checkpoint performance:''' , _lowercase ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f: lowerCAmelCase_ = json.load(_lowercase ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowerCAmelCase_ = {} for epoch in range(_lowercase , _lowercase ): model.train() for step, batch in enumerate(_lowercase ): lowerCAmelCase_ = model(**_lowercase ) lowerCAmelCase_ = outputs.loss lowerCAmelCase_ = loss / gradient_accumulation_steps accelerator.backward(_lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowerCAmelCase_ = F"""epoch_{epoch}""" lowerCAmelCase_ = os.path.join(args.output_dir , _lowercase ) accelerator.save_state(_lowercase ) lowerCAmelCase_ = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase ) lowerCAmelCase_ = accuracy lowerCAmelCase_ = lr_scheduler.get_lr()[0] lowerCAmelCase_ = optimizer.param_groups[0]['''lr'''] lowerCAmelCase_ = epoch lowerCAmelCase_ = overall_step accelerator.print(F"""epoch {epoch}:""" , _lowercase ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , '''w''' ) as f: json.dump(_lowercase , _lowercase ) def UpperCAmelCase ( ) -> Optional[Any]: """simple docstring""" lowerCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , ) parser.add_argument( '''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=_lowercase , default=_lowercase , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=_lowercase , default=_lowercase , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=_lowercase , default=2 , help='''Number of train epochs.''' , ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6} training_function(_lowercase , _lowercase ) if __name__ == "__main__": main()
552
0
import math import unittest def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ): """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' with self.assertRaises(__lowercase ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , ) self.assertFalse( is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
547
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCamelCase__ = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
547
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCamelCase : def __init__( self: Dict,A_: List[str],A_: List[Any]=3,A_: Union[str, Any]=32,A_: List[str]=3,A_: List[str]=10,A_: Any=[10, 20, 30, 40],A_: List[str]=[1, 1, 2, 1],A_: Optional[int]=True,A_: Tuple=True,A_: str="relu",A_: Any=3,A_: Tuple=None,): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = num_channels __UpperCamelCase = embeddings_size __UpperCamelCase = hidden_sizes __UpperCamelCase = depths __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = hidden_act __UpperCamelCase = num_labels __UpperCamelCase = scope __UpperCamelCase = len(A_ ) def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size],self.num_labels ) __UpperCamelCase = self.get_config() return config, pixel_values, labels def snake_case_ ( self: Dict ): '''simple docstring''' return ResNetConfig( num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,image_size=self.image_size,) def snake_case_ ( self: str,A_: Union[str, Any],A_: int,A_: str ): '''simple docstring''' __UpperCamelCase = TFResNetModel(config=A_ ) __UpperCamelCase = model(A_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),) def snake_case_ ( self: Tuple,A_: Dict,A_: Union[str, Any],A_: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = TFResNetForImageClassification(A_ ) __UpperCamelCase = model(A_,labels=A_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = config_and_inputs __UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __lowerCamelCase (_a , _a , unittest.TestCase ): _lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _lowercase = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False _lowercase = False def snake_case_ ( self: List[str] ): '''simple docstring''' __UpperCamelCase = TFResNetModelTester(self ) __UpperCamelCase = ConfigTester(self,config_class=A_,has_text_modality=A_ ) def snake_case_ ( self: Dict ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case_ ( self: Tuple ): '''simple docstring''' return @unittest.skip(reason='ResNet does not use inputs_embeds' ) def snake_case_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip(reason='ResNet does not support input and output embeddings' ) def snake_case_ ( self: List[str] ): '''simple docstring''' pass def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(A_ ) __UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1],A_ ) def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def snake_case_ ( self: Dict ): '''simple docstring''' def check_hidden_states_output(A_: Union[str, Any],A_: Optional[Any],A_: Union[str, Any] ): __UpperCamelCase = model_class(A_ ) __UpperCamelCase = model(**self._prepare_for_class(A_,A_ ) ) __UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(A_ ),expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],) __UpperCamelCase, __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: __UpperCamelCase = layer_type __UpperCamelCase = True check_hidden_states_output(A_,A_,A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase = True check_hidden_states_output(A_,A_,A_ ) def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def snake_case_ ( self: List[str] ): '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFResNetModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _A ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __lowerCamelCase (unittest.TestCase ): @cached_property def snake_case_ ( self: List[str] ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def snake_case_ ( self: Optional[Any] ): '''simple docstring''' __UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=A_,return_tensors='tf' ) # forward pass __UpperCamelCase = model(**A_ ) # verify the logits __UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape,A_ ) __UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(),A_,atol=1E-4 ) )
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCamelCase (_a , unittest.TestCase ): _lowercase = KandinskyInpaintPipeline _lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] _lowercase = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] _lowercase = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _lowercase = False @property def snake_case_ ( self: int ): '''simple docstring''' return 32 @property def snake_case_ ( self: str ): '''simple docstring''' return 32 @property def snake_case_ ( self: Tuple ): '''simple docstring''' return self.time_input_dim @property def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def snake_case_ ( self: Optional[int] ): '''simple docstring''' return 100 @property def snake_case_ ( self: str ): '''simple docstring''' __UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def snake_case_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = MCLIPConfig( numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,) __UpperCamelCase = MultilingualCLIP(A_ ) __UpperCamelCase = text_encoder.eval() return text_encoder @property def snake_case_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __UpperCamelCase = UNetaDConditionModel(**A_ ) return model @property def snake_case_ ( self: str ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case_ ( self: str ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = VQModel(**self.dummy_movq_kwargs ) return model def snake_case_ ( self: Dict ): '''simple docstring''' __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = self.dummy_tokenizer __UpperCamelCase = self.dummy_unet __UpperCamelCase = self.dummy_movq __UpperCamelCase = DDIMScheduler( num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,) __UpperCamelCase = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ): '''simple docstring''' __UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ ) __UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image __UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ ) __UpperCamelCase = image.cpu().permute(0,2,3,1 )[0] __UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) ) # create mask __UpperCamelCase = np.ones((64, 64),dtype=np.floataa ) __UpperCamelCase = 0 if str(A_ ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(A_ ) else: __UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) __UpperCamelCase = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = 'cpu' __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = self.pipeline_class(**A_ ) __UpperCamelCase = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) ) __UpperCamelCase = output.images __UpperCamelCase = pipe( **self.get_dummy_inputs(A_ ),return_dict=A_,)[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) __UpperCamelCase = np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def snake_case_ ( self: Optional[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __lowerCamelCase (unittest.TestCase ): def snake_case_ ( self: Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) __UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __UpperCamelCase = np.ones((768, 768),dtype=np.floataa ) __UpperCamelCase = 0 __UpperCamelCase = 'a hat' __UpperCamelCase = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa ) pipe_prior.to(A_ ) __UpperCamelCase = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa ) __UpperCamelCase = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) __UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 ) __UpperCamelCase, __UpperCamelCase = pipe_prior( A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple() __UpperCamelCase = pipeline( A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',) __UpperCamelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_,A_ )
1
1
"""simple docstring""" import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase__ : def __init__( self : List[Any] , A_ : Optional[Any] , A_ : str=1_3 , A_ : Optional[Any]=3_2 , A_ : Tuple=3 , A_ : Optional[int]=4 , A_ : List[Any]=[1_0, 2_0, 3_0, 4_0] , A_ : List[str]=[2, 2, 3, 2] , A_ : str=True , A_ : Optional[Any]=True , A_ : str=3_7 , A_ : Any="gelu" , A_ : Tuple=1_0 , A_ : Optional[int]=0.02 , A_ : int=["stage2", "stage3", "stage4"] , A_ : Optional[Any]=[2, 3, 4] , A_ : Any=None , ): '''simple docstring''' __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = num_channels __lowercase = num_stages __lowercase = hidden_sizes __lowercase = depths __lowercase = is_training __lowercase = use_labels __lowercase = intermediate_size __lowercase = hidden_act __lowercase = num_labels __lowercase = initializer_range __lowercase = out_features __lowercase = out_indices __lowercase = scope def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.num_labels ) __lowercase = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : List[Any] , A_ : int , A_ : int ): '''simple docstring''' __lowercase = ConvNextModel(config=A_ ) model.to(A_ ) model.eval() __lowercase = model(A_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : List[Any] , A_ : Tuple , A_ : List[str] ): '''simple docstring''' __lowercase = ConvNextForImageClassification(A_ ) model.to(A_ ) model.eval() __lowercase = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : Optional[int] , A_ : List[str] , A_ : Union[str, Any] ): '''simple docstring''' __lowercase = ConvNextBackbone(config=A_ ) model.to(A_ ) model.eval() __lowercase = model(A_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __lowercase = None __lowercase = ConvNextBackbone(config=A_ ) model.to(A_ ) model.eval() __lowercase = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( _a , _a , unittest.TestCase ): a : Optional[Any] = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) a : str = ( {"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification} if is_torch_available() else {} ) a : str = True a : List[str] = False a : str = False a : Optional[int] = False a : str = False def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' __lowercase = ConvNextModelTester(self ) __lowercase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' return @unittest.skip(reason="""ConvNext does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""" ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(A_ ) __lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A_ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A_ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' def check_hidden_states_output(A_ : Union[str, Any] , A_ : Optional[int] , A_ : int ): __lowercase = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): __lowercase = model(**self._prepare_for_class(A_ , A_ ) ) __lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase = self.model_tester.num_stages self.assertEqual(len(A_ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(A_ , A_ , A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(A_ , A_ , A_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = ConvNextModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def lowerCAmelCase_ ( ): """simple docstring""" __lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' __lowercase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(A_ ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ ) # forward pass with torch.no_grad(): __lowercase = model(**A_ ) # verify the logits __lowercase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , A_ ) __lowercase = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) ) @require_torch class lowerCamelCase__ ( unittest.TestCase , _a ): a : List[Any] = (ConvNextBackbone,) if is_torch_available() else () a : Dict = ConvNextConfig a : List[str] = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' __lowercase = ConvNextModelTester(self )
442
"""simple docstring""" def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ): """simple docstring""" def get_matched_characters(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str: __lowercase = [] __lowercase = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): __lowercase = int(max(0 , i - limit ) ) __lowercase = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(UpperCamelCase__ ) __lowercase = f'''{_stra[0:_stra.index(UpperCamelCase__ )]} {_stra[_stra.index(UpperCamelCase__ ) + 1:]}''' return "".join(UpperCamelCase__ ) # matching characters __lowercase = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ ) __lowercase = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ ) __lowercase = len(UpperCamelCase__ ) # transposition __lowercase = ( len([(ca, ca) for ca, ca in zip(UpperCamelCase__ , UpperCamelCase__ ) if ca != ca] ) // 2 ) if not match_count: __lowercase = 0.0 else: __lowercase = ( 1 / 3 * ( match_count / len(UpperCamelCase__ ) + match_count / len(UpperCamelCase__ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters __lowercase = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("hello", "world"))
442
1
def lowerCAmelCase__( lowercase : list , lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int: if index == number_of_items: return 0 __snake_case : Optional[int] = 0 __snake_case : List[Any] = 0 __snake_case : int = knapsack(lowercase , lowercase , lowercase , lowercase , index + 1 ) if weights[index] <= max_weight: __snake_case : List[str] = values[index] + knapsack( lowercase , lowercase , lowercase , max_weight - weights[index] , index + 1 ) return max(lowercase , lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
243
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
243
1
'''simple docstring''' def _UpperCAmelCase ( _UpperCamelCase : int ) -> bool: if not isinstance(_UpperCamelCase, _UpperCamelCase ): A_ = F'''Input value of [number={number}] must be an integer''' raise TypeError(_UpperCamelCase ) if number < 0: return False A_ = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
714
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __snake_case : Optional[Any] = 'pt' elif is_tf_available(): __snake_case : List[Any] = 'tf' else: __snake_case : Tuple = 'jax' class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = PerceiverTokenizer __lowercase : str = False def __A ( self ) -> List[str]: super().setUp() A_ = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __A ( self ) -> Optional[int]: return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def __A ( self , **_SCREAMING_SNAKE_CASE ) -> PerceiverTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ) def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. A_ = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): try: A_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE ) except UnicodeDecodeError: pass toks.append((i, tok) ) A_ = list(filter(lambda _SCREAMING_SNAKE_CASE : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _SCREAMING_SNAKE_CASE ) ) A_ = list(filter(lambda _SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) if max_length is not None and len(_SCREAMING_SNAKE_CASE ) > max_length: A_ = toks[:max_length] if min_length is not None and len(_SCREAMING_SNAKE_CASE ) < min_length and len(_SCREAMING_SNAKE_CASE ) > 0: while len(_SCREAMING_SNAKE_CASE ) < min_length: A_ = toks + toks # toks_str = [t[1] for t in toks] A_ = [t[0] for t in toks] # Ensure consistency A_ = tokenizer.decode(_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE ) if " " not in output_txt and len(_SCREAMING_SNAKE_CASE ) > 1: A_ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE ) ) if with_prefix_space: A_ = ''' ''' + output_txt A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) return output_txt, output_ids def __A ( self ) -> int: A_ = self.perceiver_tokenizer A_ = '''Unicode €.''' A_ = tokenizer(_SCREAMING_SNAKE_CASE ) A_ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['''input_ids'''] , _SCREAMING_SNAKE_CASE ) # decoding A_ = tokenizer.decode(_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , '''[CLS]Unicode €.[SEP]''' ) A_ = tokenizer('''e è é ê ë''' ) A_ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['''input_ids'''] , _SCREAMING_SNAKE_CASE ) # decoding A_ = tokenizer.decode(_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def __A ( self ) -> str: A_ = self.perceiver_tokenizer A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off A_ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on A_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if FRAMEWORK != "jax": A_ = list(batch.input_ids.numpy()[0] ) else: A_ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def __A ( self ) -> Any: A_ = self.perceiver_tokenizer A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] A_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , _SCREAMING_SNAKE_CASE ) self.assertIn('''attention_mask''' , _SCREAMING_SNAKE_CASE ) self.assertNotIn('''decoder_input_ids''' , _SCREAMING_SNAKE_CASE ) self.assertNotIn('''decoder_attention_mask''' , _SCREAMING_SNAKE_CASE ) def __A ( self ) -> int: A_ = self.perceiver_tokenizer A_ = [ '''Summary of the text.''', '''Another summary.''', ] A_ = tokenizer( text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding='''max_length''' , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def __A ( self ) -> Tuple: # safety check on max_len default value so we are sure the test works A_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test A_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc A_ = tempfile.mkdtemp() A_ = ''' He is very happy, UNwant\u00E9d,running''' A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) A_ = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = after_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) shutil.rmtree(_SCREAMING_SNAKE_CASE ) A_ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc A_ = tempfile.mkdtemp() A_ = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) A_ = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) A_ = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = after_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) A_ = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_SCREAMING_SNAKE_CASE ) def __A ( self ) -> Optional[Any]: A_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: A_ = json.load(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: A_ = json.load(_SCREAMING_SNAKE_CASE ) A_ = [F'''<extra_id_{i}>''' for i in range(125 )] A_ = added_tokens_extra_ids + [ '''an_additional_special_token''' ] A_ = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(_SCREAMING_SNAKE_CASE , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files A_ = tokenizer_class.from_pretrained( _SCREAMING_SNAKE_CASE , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained A_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_SCREAMING_SNAKE_CASE )] A_ = tokenizer_class.from_pretrained( _SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def __A ( self ) -> Tuple: A_ = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '''�''' ) def __A ( self ) -> str: pass def __A ( self ) -> Union[str, Any]: pass def __A ( self ) -> Optional[Any]: pass def __A ( self ) -> List[str]: pass def __A ( self ) -> List[Any]: # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens A_ = self.get_tokenizers(fast=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): A_ = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] A_ = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
174
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings A = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n' @add_start_docstrings(a__) class __snake_case ( a__): _lowerCAmelCase = '''rag''' _lowerCAmelCase = True def __init__( self, A=None, A=True, A=None, A=None, A=None, A=None, A=None, A=" / ", A=" // ", A=5, A=300, A=768, A=8, A="wiki_dpr", A="train", A="compressed", A=None, A=None, A=False, A=False, A=0.0, A=True, A=False, A=False, A=False, A=True, A=None, **A, ): """simple docstring""" super().__init__( bos_token_id=A, pad_token_id=A, eos_token_id=A, decoder_start_token_id=A, forced_eos_token_id=A, is_encoder_decoder=A, prefix=A, vocab_size=A, **A, ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowerCamelCase : str = kwargs.pop('question_encoder' ) lowerCamelCase : Union[str, Any] = question_encoder_config.pop('model_type' ) lowerCamelCase : List[Any] = kwargs.pop('generator' ) lowerCamelCase : Tuple = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig lowerCamelCase : Optional[int] = AutoConfig.for_model(A, **A ) lowerCamelCase : str = AutoConfig.for_model(A, **A ) lowerCamelCase : Optional[int] = reduce_loss lowerCamelCase : Optional[int] = label_smoothing lowerCamelCase : List[Any] = exclude_bos_score lowerCamelCase : Optional[Any] = do_marginalize lowerCamelCase : Union[str, Any] = title_sep lowerCamelCase : Dict = doc_sep lowerCamelCase : Optional[Any] = n_docs lowerCamelCase : str = max_combined_length lowerCamelCase : Any = dataset lowerCamelCase : Dict = dataset_split lowerCamelCase : Union[str, Any] = index_name lowerCamelCase : Union[str, Any] = retrieval_vector_size lowerCamelCase : Dict = retrieval_batch_size lowerCamelCase : Tuple = passages_path lowerCamelCase : Optional[Any] = index_path lowerCamelCase : List[Any] = use_dummy_dataset lowerCamelCase : Any = output_retrieved lowerCamelCase : str = do_deduplication lowerCamelCase : List[Any] = use_cache if self.forced_eos_token_id is None: lowerCamelCase : str = getattr(self.generator, 'forced_eos_token_id', A ) @classmethod def UpperCAmelCase_ ( cls, A, A, **A ): """simple docstring""" return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **A ) def UpperCAmelCase_ ( self ): """simple docstring""" lowerCamelCase : int = copy.deepcopy(self.__dict__ ) lowerCamelCase : List[str] = self.question_encoder.to_dict() lowerCamelCase : List[str] = self.generator.to_dict() lowerCamelCase : Union[str, Any] = self.__class__.model_type return output
320
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = 'Hello world! cécé herlolip' def _SCREAMING_SNAKE_CASE( snake_case_ : str , snake_case_ : str , snake_case_ : bool ) ->Union[str, Any]: '''simple docstring''' _lowercase : List[Any] = FairseqRobertaModel.from_pretrained(snake_case_ ) roberta.eval() # disable dropout _lowercase : List[Any] = roberta.model.encoder.sentence_encoder _lowercase : Optional[Any] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: _lowercase : List[str] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , snake_case_ ) _lowercase : Union[str, Any] = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ ) model.eval() # Now let's copy all the weights. # Embeddings _lowercase : Union[str, Any] = roberta_sent_encoder.embed_tokens.weight _lowercase : List[Any] = roberta_sent_encoder.embed_positions.weight _lowercase : Any = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _lowercase : List[Any] = roberta_sent_encoder.layer_norm.weight _lowercase : List[Any] = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _lowercase : BertLayer = model.roberta.encoder.layer[i] _lowercase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] _lowercase : RobertaAttention = layer.attention _lowercase : Union[str, Any] = roberta_layer.self_attn_layer_norm.weight _lowercase : str = roberta_layer.self_attn_layer_norm.bias # self attention _lowercase : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _lowercase : Any = roberta_layer.self_attn.q_proj.weight _lowercase : Optional[int] = roberta_layer.self_attn.q_proj.bias _lowercase : Tuple = roberta_layer.self_attn.k_proj.weight _lowercase : Union[str, Any] = roberta_layer.self_attn.k_proj.bias _lowercase : int = roberta_layer.self_attn.v_proj.weight _lowercase : Union[str, Any] = roberta_layer.self_attn.v_proj.bias # self-attention output _lowercase : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _lowercase : str = roberta_layer.self_attn.out_proj.weight _lowercase : Union[str, Any] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _lowercase : Dict = roberta_layer.final_layer_norm.weight _lowercase : int = roberta_layer.final_layer_norm.bias # intermediate _lowercase : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _lowercase : str = roberta_layer.fca.weight _lowercase : Tuple = roberta_layer.fca.bias # output _lowercase : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _lowercase : Optional[Any] = roberta_layer.fca.weight _lowercase : Optional[Any] = roberta_layer.fca.bias # end of layer if classification_head: _lowercase : Dict = roberta.model.classification_heads['''mnli'''].dense.weight _lowercase : Union[str, Any] = roberta.model.classification_heads['''mnli'''].dense.bias _lowercase : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.weight _lowercase : str = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head _lowercase : Optional[Any] = roberta.model.encoder.lm_head.dense.weight _lowercase : Optional[Any] = roberta.model.encoder.lm_head.dense.bias _lowercase : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight _lowercase : int = roberta.model.encoder.lm_head.layer_norm.bias _lowercase : Dict = roberta.model.encoder.lm_head.weight _lowercase : Tuple = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _lowercase : torch.Tensor = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1 _lowercase : Optional[Any] = model(snake_case_ )[0] if classification_head: _lowercase : Optional[Any] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(snake_case_ ) ) else: _lowercase : Tuple = roberta.model(snake_case_ )[0] print(our_output.shape , their_output.shape ) _lowercase : Dict = torch.max(torch.abs(our_output - their_output ) ).item() print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 _lowercase : Tuple = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) lowerCamelCase__ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
720
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase__ = 'naver-clova-ix/donut-base' class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowercase ( self : Tuple ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = DonutProcessor.from_pretrained(UpperCamelCase_ ) def __lowercase ( self : Tuple ) -> Tuple: '''simple docstring''' _lowercase : str = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } _lowercase : List[str] = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) _lowercase : str = self.processor.tokenajson(UpperCamelCase_ ) self.assertDictEqual(UpperCamelCase_ , UpperCamelCase_ )
411
0
from collections import defaultdict from math import ceil, sqrt def _lowercase( __a : int = 100_0000 , __a : int = 10 ): a__ =defaultdict(__a ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: a__ =max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: a__ =1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__a , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F"""{solution() = }""")
20
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
0
"""simple docstring""" import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) __lowerCAmelCase : int = { '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class _lowerCAmelCase ( snake_case__ ): """simple docstring""" _lowerCamelCase = '''segformer''' def __init__( self , _lowercase=3 , _lowercase=4 , _lowercase=[2, 2, 2, 2] , _lowercase=[8, 4, 2, 1] , _lowercase=[3_2, 6_4, 1_6_0, 2_5_6] , _lowercase=[7, 3, 3, 3] , _lowercase=[4, 2, 2, 2] , _lowercase=[1, 2, 5, 8] , _lowercase=[4, 4, 4, 4] , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=0.1 , _lowercase=1E-6 , _lowercase=2_5_6 , _lowercase=2_5_5 , **_lowercase , ) -> List[Any]: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , _SCREAMING_SNAKE_CASE , ) snake_case_ : Optional[Any] = num_channels snake_case_ : List[str] = num_encoder_blocks snake_case_ : str = depths snake_case_ : Optional[Any] = sr_ratios snake_case_ : Dict = hidden_sizes snake_case_ : Tuple = patch_sizes snake_case_ : Optional[Any] = strides snake_case_ : List[Any] = mlp_ratios snake_case_ : Dict = num_attention_heads snake_case_ : Union[str, Any] = hidden_act snake_case_ : Dict = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : Optional[int] = classifier_dropout_prob snake_case_ : int = initializer_range snake_case_ : str = drop_path_rate snake_case_ : Optional[int] = layer_norm_eps snake_case_ : str = decoder_hidden_size snake_case_ : Any = kwargs.get("""reshape_last_stage""" , _SCREAMING_SNAKE_CASE ) snake_case_ : str = semantic_loss_ignore_index class _lowerCAmelCase ( snake_case__ ): """simple docstring""" _lowerCamelCase = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase__ ( self ) -> float: '''simple docstring''' return 1E-4 @property def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' return 1_2
721
"""simple docstring""" def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b" snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b" snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
21
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : Dict = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Union[str, Any] = """lxmert""" _lowercase : Optional[int] = {} def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=9_5_0_0 , lowerCAmelCase__=1_6_0_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=9 , lowerCAmelCase__=5 , lowerCAmelCase__=5 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=6.67 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Dict: '''simple docstring''' a__ : str =vocab_size a__ : Any =hidden_size a__ : Optional[int] =num_attention_heads a__ : Tuple =hidden_act a__ : Tuple =intermediate_size a__ : List[str] =hidden_dropout_prob a__ : Optional[Any] =attention_probs_dropout_prob a__ : str =max_position_embeddings a__ : Optional[int] =type_vocab_size a__ : Any =initializer_range a__ : Any =layer_norm_eps a__ : Union[str, Any] =num_qa_labels a__ : List[Any] =num_object_labels a__ : Union[str, Any] =num_attr_labels a__ : Dict =l_layers a__ : List[str] =x_layers a__ : Optional[Any] =r_layers a__ : Dict =visual_feat_dim a__ : Dict =visual_pos_dim a__ : List[str] =visual_loss_normalizer a__ : Optional[Any] =task_matched a__ : Optional[Any] =task_mask_lm a__ : List[Any] =task_obj_predict a__ : str =task_qa a__ : Union[str, Any] =visual_obj_loss a__ : Union[str, Any] =visual_attr_loss a__ : Union[str, Any] =visual_feat_loss a__ : int ={"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**lowerCAmelCase__ )
563
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase): _lowercase : Optional[Any] = KandinskyVaaInpaintPipeline _lowercase : Union[str, Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] _lowercase : Optional[Any] = [ """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] _lowercase : Tuple = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] _lowercase : List[str] = False @property def _lowercase ( self ) -> int: '''simple docstring''' return 3_2 @property def _lowercase ( self ) -> int: '''simple docstring''' return 3_2 @property def _lowercase ( self ) -> Optional[int]: '''simple docstring''' return self.time_input_dim @property def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' return self.time_input_dim * 4 @property def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' return 1_0_0 @property def _lowercase ( self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) a__ : Any ={ "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a__ : Dict =UNetaDConditionModel(**lowerCAmelCase__ ) return model @property def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) a__ : Tuple =VQModel(**self.dummy_movq_kwargs ) return model def _lowercase ( self ) -> List[Any]: '''simple docstring''' a__ : Optional[int] =self.dummy_unet a__ : List[str] =self.dummy_movq a__ : Optional[Any] =DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , ) a__ : List[str] ={ "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Union[str, Any]: '''simple docstring''' a__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) a__ : Tuple =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowerCAmelCase__ ) # create init_image a__ : int =floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) a__ : str =image.cpu().permute(0 , 2 , 3 , 1 )[0] a__ : Tuple =Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) # create mask a__ : Dict =np.ones((6_4, 6_4) , dtype=np.floataa ) a__ : List[str] =0 if str(lowerCAmelCase__ ).startswith("mps" ): a__ : str =torch.manual_seed(lowerCAmelCase__ ) else: a__ : List[Any] =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) a__ : Tuple ={ "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : List[Any] ="cpu" a__ : str =self.get_dummy_components() a__ : Tuple =self.pipeline_class(**lowerCAmelCase__ ) a__ : str =pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : Optional[Any] =pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) ) a__ : str =output.images a__ : str =pipe( **self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0] a__ : Dict =image[0, -3:, -3:, -1] a__ : Any =image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 6_4, 6_4, 3) a__ : int =np.array( [0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def _lowercase ( self ) -> Optional[int]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase): def _lowercase ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> int: '''simple docstring''' a__ : Any =load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" ) a__ : List[str] =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) a__ : List[str] =np.ones((7_6_8, 7_6_8) , dtype=np.floataa ) a__ : Any =0 a__ : Optional[int] ="a hat" a__ : List[Any] =KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(lowerCAmelCase__ ) a__ : int =KandinskyVaaInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa ) a__ : List[str] =pipeline.to(lowerCAmelCase__ ) pipeline.set_progress_bar_config(disable=lowerCAmelCase__ ) a__ : List[str] =torch.Generator(device="cpu" ).manual_seed(0 ) a__ , a__ : List[Any] =pipe_prior( lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a__ : List[Any] =pipeline( image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , ) a__ : int =output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
563
1
import math import random from typing import Any from .hill_climbing import SearchProblem def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : float = math.inf , _SCREAMING_SNAKE_CASE : float = -math.inf , _SCREAMING_SNAKE_CASE : float = math.inf , _SCREAMING_SNAKE_CASE : float = -math.inf , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : float = 1_0_0 , _SCREAMING_SNAKE_CASE : float = 0.01 , _SCREAMING_SNAKE_CASE : float = 1 , )->Any: _lowerCAmelCase = False _lowerCAmelCase = search_prob _lowerCAmelCase = start_temperate _lowerCAmelCase = [] _lowerCAmelCase = 0 _lowerCAmelCase = None while not search_end: _lowerCAmelCase = current_state.score() if best_state is None or current_score > best_state.score(): _lowerCAmelCase = current_state scores.append(_SCREAMING_SNAKE_CASE ) iterations += 1 _lowerCAmelCase = None _lowerCAmelCase = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to _lowerCAmelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor _lowerCAmelCase = neighbors.pop(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: _lowerCAmelCase = change * -1 # in case we are finding minimum if change > 0: # improves the solution _lowerCAmelCase = picked_neighbor else: _lowerCAmelCase = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability _lowerCAmelCase = picked_neighbor _lowerCAmelCase = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor _lowerCAmelCase = True else: _lowerCAmelCase = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict )->Any: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) UpperCAmelCase_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) UpperCAmelCase_ = simulated_annealing( prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( "The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) UpperCAmelCase_ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) UpperCAmelCase_ = simulated_annealing( prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( "The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]: return (3 * x**2) - (6 * y) UpperCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) UpperCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True) print( "The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " F"""{local_min.score()}""" ) UpperCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) UpperCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True) print( "The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " F"""{local_min.score()}""" )
664
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = "Hello world! cécé herlolip" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]: _lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout _lowerCAmelCase = roberta.model.encoder.sentence_encoder _lowerCAmelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings _lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight _lowerCAmelCase = roberta_sent_encoder.embed_positions.weight _lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _lowerCAmelCase = roberta_sent_encoder.layer_norm.weight _lowerCAmelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _lowerCAmelCase = model.roberta.encoder.layer[i] _lowerCAmelCase = roberta_sent_encoder.layers[i] _lowerCAmelCase = layer.attention _lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight _lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias # self attention _lowerCAmelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _lowerCAmelCase = roberta_layer.self_attn.q_proj.weight _lowerCAmelCase = roberta_layer.self_attn.q_proj.bias _lowerCAmelCase = roberta_layer.self_attn.k_proj.weight _lowerCAmelCase = roberta_layer.self_attn.k_proj.bias _lowerCAmelCase = roberta_layer.self_attn.v_proj.weight _lowerCAmelCase = roberta_layer.self_attn.v_proj.bias # self-attention output _lowerCAmelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _lowerCAmelCase = roberta_layer.self_attn.out_proj.weight _lowerCAmelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _lowerCAmelCase = roberta_layer.final_layer_norm.weight _lowerCAmelCase = roberta_layer.final_layer_norm.bias # intermediate _lowerCAmelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # output _lowerCAmelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _lowerCAmelCase = roberta_layer.fca.weight _lowerCAmelCase = roberta_layer.fca.bias # end of layer if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight _lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head _lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight _lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight _lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias _lowerCAmelCase = roberta.model.encoder.lm_head.weight _lowerCAmelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 _lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0] if classification_head: _lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) ) else: _lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) _lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 _lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) UpperCAmelCase_ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
664
1
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __a : Optional[Any] = 1_6 __a : List[str] = 3_2 def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str: return int(x / 2**20 ) class UpperCAmelCase: """simple docstring""" def __enter__( self ) -> Dict: """simple docstring""" gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowercase__ : Union[str, Any] = torch.cuda.memory_allocated() return self def __exit__( self , *lowerCamelCase ) -> int: """simple docstring""" gc.collect() torch.cuda.empty_cache() lowercase__ : List[str] = torch.cuda.memory_allocated() lowercase__ : List[str] = torch.cuda.max_memory_allocated() lowercase__ : Any = bamb(self.end - self.begin ) lowercase__ : Any = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 16 ,SCREAMING_SNAKE_CASE_ = "bert-base-cased" ,SCREAMING_SNAKE_CASE_ = 3_20 ,SCREAMING_SNAKE_CASE_ = 1_60 ,) -> Union[str, Any]: lowercase__ : str = AutoTokenizer.from_pretrained(snake_case__ ) lowercase__ : Dict = load_dataset( "glue" ,"mrpc" ,split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} ) def tokenize_function(SCREAMING_SNAKE_CASE_ ): # max_length=None => use the model max length (it's actually the default) lowercase__ : Optional[int] = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=snake_case__ ,max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ : Optional[Any] = datasets.map( snake_case__ ,batched=snake_case__ ,remove_columns=["idx", "sentence1", "sentence2"] ,load_from_cache_file=snake_case__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Dict = tokenized_datasets.rename_column("label" ,"labels" ) def collate_fn(SCREAMING_SNAKE_CASE_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ ,padding="max_length" ,max_length=1_28 ,return_tensors="pt" ) return tokenizer.pad(snake_case__ ,padding="longest" ,return_tensors="pt" ) # Instantiate dataloaders. lowercase__ : Dict = DataLoader( tokenized_datasets["train"] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ ) lowercase__ : str = DataLoader( tokenized_datasets["validation"] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=snake_case__ ) return train_dataloader, eval_dataloader def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int: lowercase__ : Optional[int] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Optional[Any] = config["""lr"""] lowercase__ : int = int(config["num_epochs"] ) lowercase__ : List[str] = int(config["seed"] ) lowercase__ : int = int(config["batch_size"] ) lowercase__ : Optional[int] = args.model_name_or_path set_seed(snake_case__ ) lowercase__ : Union[str, Any] = get_dataloaders(snake_case__ ,snake_case__ ,snake_case__ ,args.n_train ,args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : Tuple = AutoModelForSequenceClassification.from_pretrained(snake_case__ ,return_dict=snake_case__ ) # Instantiate optimizer lowercase__ : List[str] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ : Any = optimizer_cls(params=model.parameters() ,lr=snake_case__ ) if accelerator.state.deepspeed_plugin is not None: lowercase__ : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowercase__ : Optional[int] = 1 lowercase__ : Dict = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=snake_case__ ,num_warmup_steps=0 ,num_training_steps=snake_case__ ,) else: lowercase__ : Any = DummyScheduler(snake_case__ ,total_num_steps=snake_case__ ,warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ : str = accelerator.prepare( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) # We need to keep track of how many total steps we have iterated over lowercase__ : Dict = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ : Any = 0 # Now we train the model lowercase__ : Optional[int] = {} for epoch in range(snake_case__ ,snake_case__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(snake_case__ ): lowercase__ : Optional[int] = model(**snake_case__ ) lowercase__ : Optional[Any] = outputs.loss lowercase__ : str = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) ) accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) ) accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) ) accelerator.print( "Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowercase__ : Optional[Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir ,"peak_memory_utilization.json" ) ,"w" ) as f: json.dump(snake_case__ ,snake_case__ ) def snake_case_ ( ) -> List[str]: lowercase__ : List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" ,type=snake_case__ ,default="bert-base-cased" ,help="Path to pretrained model or model identifier from huggingface.co/models." ,required=snake_case__ ,) parser.add_argument( "--output_dir" ,type=snake_case__ ,default="." ,help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." ,) parser.add_argument( "--peak_memory_upper_bound" ,type=snake_case__ ,default=snake_case__ ,help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." ,) parser.add_argument( "--n_train" ,type=snake_case__ ,default=3_20 ,help="Number of training examples to use." ,) parser.add_argument( "--n_val" ,type=snake_case__ ,default=1_60 ,help="Number of validation examples to use." ,) parser.add_argument( "--num_epochs" ,type=snake_case__ ,default=1 ,help="Number of train epochs." ,) lowercase__ : Tuple = parser.parse_args() lowercase__ : Dict = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(snake_case__ ,snake_case__ ) if __name__ == "__main__": main()
397
def UpperCamelCase ( snake_case__ : float ,snake_case__ : int ): '''simple docstring''' if digit_amount > 0: return round(number - int(snake_case__ ) ,snake_case__ ) return number - int(snake_case__ ) if __name__ == "__main__": print(decimal_isolate(1.5_3, 0)) print(decimal_isolate(3_5.3_4_5, 1)) print(decimal_isolate(3_5.3_4_5, 2)) print(decimal_isolate(3_5.3_4_5, 3)) print(decimal_isolate(-1_4.7_8_9, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-1_4.1_2_3, 1)) print(decimal_isolate(-1_4.1_2_3, 2)) print(decimal_isolate(-1_4.1_2_3, 3))
455
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { "microsoft/beit-base-patch16-224-pt22k": ( "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _a ( lowerCamelCase_ ): """simple docstring""" __SCREAMING_SNAKE_CASE = 'beit' def __init__( self , lowerCAmelCase_=8192 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=224 , lowerCAmelCase_=16 , lowerCAmelCase_=3 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=True , lowerCAmelCase_=[3, 5, 7, 11] , lowerCAmelCase_=[1, 2, 3, 6] , lowerCAmelCase_=True , lowerCAmelCase_=0.4 , lowerCAmelCase_=256 , lowerCAmelCase_=1 , lowerCAmelCase_=False , lowerCAmelCase_=255 , **lowerCAmelCase_ , ): super().__init__(**lowerCAmelCase_ ) _lowercase =vocab_size _lowercase =hidden_size _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =intermediate_size _lowercase =hidden_act _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =initializer_range _lowercase =layer_norm_eps _lowercase =image_size _lowercase =patch_size _lowercase =num_channels _lowercase =use_mask_token _lowercase =use_absolute_position_embeddings _lowercase =use_relative_position_bias _lowercase =use_shared_relative_position_bias _lowercase =layer_scale_init_value _lowercase =drop_path_rate _lowercase =use_mean_pooling # decode head attributes (semantic segmentation) _lowercase =out_indices _lowercase =pool_scales # auxiliary head attributes (semantic segmentation) _lowercase =use_auxiliary_head _lowercase =auxiliary_loss_weight _lowercase =auxiliary_channels _lowercase =auxiliary_num_convs _lowercase =auxiliary_concat_input _lowercase =semantic_loss_ignore_index class _a ( lowerCamelCase_ ): """simple docstring""" __SCREAMING_SNAKE_CASE = version.parse('1.11' ) @property def __lowerCAmelCase ( self ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __lowerCAmelCase ( self ): return 1e-4
710
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def __lowerCamelCase ( ) -> Tuple: _lowercase =torch.nn.Linear(2 , 4 ) _lowercase =torch.optim.AdamW(model.parameters() , lr=1.0 ) _lowercase =torch.optim.lr_scheduler.OneCycleLR(__a , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) _lowercase =DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) _lowercase =DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def __lowerCamelCase ( __a : Tuple ) -> Optional[int]: return (model.weight.abs().sum() + model.bias.abs().sum()).item() def __lowerCamelCase ( __a : str ) -> Tuple: _lowercase =torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(__a ) class _a ( lowerCamelCase_ ): """simple docstring""" @require_cuda def __lowerCAmelCase ( self ): _lowercase =Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(lowerCAmelCase_ ): _lowercase =Accelerator(cpu=lowerCAmelCase_ ) def __lowerCAmelCase ( self ): _lowercase =Accelerator() _lowercase =GradientState() assert state.num_steps == 1 _lowercase =4 assert state.num_steps == 4 assert state.sync_gradients is True _lowercase =False assert state.sync_gradients is False GradientState._reset_state() def __lowerCAmelCase ( self ): _lowercase =Accelerator() _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) =accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def __lowerCAmelCase ( self ): _lowercase =Accelerator() _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components() accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def __lowerCAmelCase ( self ): PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*lowerCAmelCase_ , **lowerCAmelCase_ ): pass with patch("torch.cuda.set_device" , lowerCAmelCase_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ): _lowercase =Accelerator() self.assertEqual(str(accelerator.state.device ) , "cuda:64" ) def __lowerCAmelCase ( self ): _lowercase =Accelerator() _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components() accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase =get_signature(lowerCAmelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(lowerCAmelCase_ ) # make sure random weights don't match load_random_weights(lowerCAmelCase_ ) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(lowerCAmelCase_ ) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) < 1e-3 ) def __lowerCAmelCase ( self ): _lowercase =Accelerator() _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components() accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase =get_signature(lowerCAmelCase_ ) # saving hook def save_config(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _lowercase ={"class_name": models[0].__class__.__name__} with open(os.path.join(lowerCAmelCase_ , "data.json" ) , "w" ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) # loading hook def load_config(lowerCAmelCase_ , lowerCAmelCase_ ): with open(os.path.join(lowerCAmelCase_ , "data.json" ) , "r" ) as f: _lowercase =json.load(lowerCAmelCase_ ) _lowercase =config["class_name"] _lowercase =accelerator.register_save_state_pre_hook(lowerCAmelCase_ ) _lowercase =accelerator.register_load_state_pre_hook(lowerCAmelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(lowerCAmelCase_ ) # make sure random weights don't match with hooks load_random_weights(lowerCAmelCase_ ) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) > 1e-3 ) # random class name to verify correct one is loaded _lowercase ="random" # make sure loaded weights match with hooks accelerator.load_state(lowerCAmelCase_ ) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(lowerCAmelCase_ ) # make sure random weights don't match with hooks removed load_random_weights(lowerCAmelCase_ ) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) > 1e-3 ) # random class name to verify correct one is loaded _lowercase ="random" # make sure loaded weights match with hooks removed accelerator.load_state(lowerCAmelCase_ ) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase_ ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def __lowerCAmelCase ( self ): _lowercase =Accelerator() _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components() _lowercase =None # This should work _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) self.assertTrue(dummy_obj is None ) def __lowerCAmelCase ( self ): _lowercase =Accelerator() _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =create_components() _lowercase =[1, 2, 3] # This should work _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual( getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , ) self.assertEqual( getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(lowerCAmelCase_ , "_is_accelerate_prepared" , lowerCAmelCase_ ) , lowerCAmelCase_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) @slow @require_bnb def __lowerCAmelCase ( self ): from transformers import AutoModelForCausalLM _lowercase =AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=lowerCAmelCase_ , device_map={"": 0} , ) _lowercase =Accelerator() # This should work _lowercase =accelerator.prepare(lowerCAmelCase_ ) @slow @require_bnb def __lowerCAmelCase ( self ): from transformers import AutoModelForCausalLM _lowercase =Accelerator() with init_empty_weights(): _lowercase =AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() _lowercase =infer_auto_device_map(lowerCAmelCase_ ) _lowercase ="cpu" _lowercase =AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , device_map=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , llm_inta_enable_fpaa_cpu_offload=lowerCAmelCase_ ) # This should not work and get value error with self.assertRaises(lowerCAmelCase_ ): _lowercase =accelerator.prepare(lowerCAmelCase_ ) @slow @require_bnb @require_multi_gpu def __lowerCAmelCase ( self ): from transformers import AutoModelForCausalLM _lowercase ={"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): _lowercase =AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() _lowercase =infer_auto_device_map(lowerCAmelCase_ ) _lowercase =1 _lowercase =AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=lowerCAmelCase_ , device_map=lowerCAmelCase_ , ) _lowercase =Accelerator() # This should not work and get value error with self.assertRaises(lowerCAmelCase_ ): _lowercase =accelerator.prepare(lowerCAmelCase_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def __lowerCAmelCase ( self ): from transformers import AutoModelForCausalLM with init_empty_weights(): _lowercase =AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) _lowercase =infer_auto_device_map(lowerCAmelCase_ ) _lowercase =1 _lowercase =AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=lowerCAmelCase_ , device_map=lowerCAmelCase_ , ) _lowercase =Accelerator() # This should work _lowercase =accelerator.prepare(lowerCAmelCase_ ) @require_cuda def __lowerCAmelCase ( self ): _lowercase =torch.nn.Linear(10 , 10 ) _lowercase =torch.optim.SGD(model.parameters() , lr=0.0_1 ) _lowercase =Accelerator(cpu=lowerCAmelCase_ ) _lowercase =accelerator.prepare(lowerCAmelCase_ )
594
0
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__lowerCamelCase ) class __lowercase ( __lowerCamelCase ): snake_case_ = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) snake_case_ = Features({"""image""": Image()} ) snake_case_ = Features({"""labels""": ClassLabel} ) snake_case_ = "image" snake_case_ = "labels" def __lowercase ( self : Dict ,A : str ): '''simple docstring''' if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features." ) if not isinstance(features[self.label_column] ,A ): raise ValueError(f"Column {self.label_column} is not a ClassLabel." ) UpperCAmelCase__ : List[str] = copy.deepcopy(self ) UpperCAmelCase__ : Optional[Any] = self.label_schema.copy() UpperCAmelCase__ : List[Any] = features[self.label_column] UpperCAmelCase__ : List[Any] = label_schema return task_template @property def __lowercase ( self : int ): '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
65
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , *__a : int , **__a : Optional[Any] ): warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
692
0
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def _a ( lowercase__ : str ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def _a ( lowercase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = np.max(_outputs , axis=-1 , keepdims=lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ ) class snake_case ( UpperCamelCase_ ): lowercase_ = 'sigmoid' lowercase_ = 'softmax' lowercase_ = 'none' @add_end_docstrings( UpperCamelCase_ , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , ) class snake_case ( UpperCamelCase_ ): lowercase_ = False lowercase_ = ClassificationFunction.NONE def __init__( self : Tuple , **a_ : Dict )-> List[str]: """simple docstring""" super().__init__(**a_ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowercase( self : Union[str, Any] , a_ : str=None , a_ : Any=None , a_ : Optional[Any]="" , **a_ : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_kwargs SCREAMING_SNAKE_CASE__ : Tuple = {} if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None: SCREAMING_SNAKE_CASE__ : Dict = self.model.config.return_all_scores if isinstance(a_ , a_ ) or top_k is None: SCREAMING_SNAKE_CASE__ : int = top_k SCREAMING_SNAKE_CASE__ : List[str] = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a_ , ) if return_all_scores: SCREAMING_SNAKE_CASE__ : Tuple = None else: SCREAMING_SNAKE_CASE__ : Optional[int] = 1 if isinstance(a_ , a_ ): SCREAMING_SNAKE_CASE__ : int = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : List[str] , *a_ : List[Any] , **a_ : List[str] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(*a_ , **a_ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. SCREAMING_SNAKE_CASE__ : int = 'top_k' not in kwargs if isinstance(args[0] , a_ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowercase( self : str , a_ : Optional[Any] , **a_ : List[Any] )-> Dict[str, GenericTensor]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.framework if isinstance(a_ , a_ ): return self.tokenizer(**a_ , return_tensors=a_ , **a_ ) elif isinstance(a_ , a_ ) and len(a_ ) == 1 and isinstance(inputs[0] , a_ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a_ , **a_ ) elif isinstance(a_ , a_ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(a_ , return_tensors=a_ , **a_ ) def __lowercase( self : List[str] , a_ : Tuple )-> List[Any]: """simple docstring""" return self.model(**a_ ) def __lowercase( self : str , a_ : Tuple , a_ : Union[str, Any]=None , a_ : str=1 , a_ : Union[str, Any]=True )-> Any: """simple docstring""" # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: SCREAMING_SNAKE_CASE__ : Tuple = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None: SCREAMING_SNAKE_CASE__ : List[str] = self.model.config.function_to_apply else: SCREAMING_SNAKE_CASE__ : List[str] = ClassificationFunction.NONE SCREAMING_SNAKE_CASE__ : int = model_outputs['logits'][0] SCREAMING_SNAKE_CASE__ : Tuple = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(a_ ) elif function_to_apply == ClassificationFunction.SOFTMAX: SCREAMING_SNAKE_CASE__ : str = softmax(a_ ) elif function_to_apply == ClassificationFunction.NONE: SCREAMING_SNAKE_CASE__ : Dict = outputs else: raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} SCREAMING_SNAKE_CASE__ : Any = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a_ ) ] if not _legacy: dict_scores.sort(key=lambda a_ : x["score"] , reverse=a_ ) if top_k is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = dict_scores[:top_k] return dict_scores
706
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case : def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = parent SCREAMING_SNAKE_CASE__ : int = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels SCREAMING_SNAKE_CASE__ : int = is_training SCREAMING_SNAKE_CASE__ : List[Any] = use_labels SCREAMING_SNAKE_CASE__ : str = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = scope SCREAMING_SNAKE_CASE__ : str = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2 def __lowercase( self : Optional[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Dict = None if self.use_labels: SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config() return config, pixel_values, labels def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Optional[int] = 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : int = model(a_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Any = 1 SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ ) model.to(a_ ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowercase( self : int )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) lowercase_ = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 ) def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" pass def __lowercase( self : str )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a_ ) def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Optional[Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(a_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ ) model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss loss.backward() def __lowercase( self : Optional[int] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Tuple = True for model_class in self.all_model_classes: if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss loss.backward() def __lowercase( self : Optional[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[str] = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(a_ ), *get_values(a_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ): SCREAMING_SNAKE_CASE__ : int = problem_type['title'] SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels'] SCREAMING_SNAKE_CASE__ : str = model_class(a_ ) model.to(a_ ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=a_ ) as warning_list: SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def __lowercase( self : Optional[Any] )-> Optional[int]: """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : int )-> Dict: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img() SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __lowercase( self : Tuple )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img() SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
636
0
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() A__ : Union[str, Any] = logging.get_logger(__name__) A__ : Union[str, Any] = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS} def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ) -> Union[str, Any]: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' ) if tokenizer_name is None: __lowerCamelCase : List[str] = TOKENIZER_CLASSES else: __lowerCamelCase : List[Any] = {tokenizer_name: getattr(UpperCAmelCase_ , tokenizer_name + 'Fast' )} logger.info(F'Loading tokenizer classes: {tokenizer_names}' ) for tokenizer_name in tokenizer_names: __lowerCamelCase : Dict = TOKENIZER_CLASSES[tokenizer_name] __lowerCamelCase : Optional[Any] = True if checkpoint_name is None: __lowerCamelCase : Dict = list(tokenizer_class.max_model_input_sizes.keys() ) else: __lowerCamelCase : int = [checkpoint_name] logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' ) for checkpoint in checkpoint_names: logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' ) # Load tokenizer __lowerCamelCase : Any = tokenizer_class.from_pretrained(UpperCAmelCase_ , force_download=UpperCAmelCase_ ) # Save fast tokenizer logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' ) # For organization names we create sub-directories if "/" in checkpoint: __lowerCamelCase , __lowerCamelCase : Dict = checkpoint.split('/' ) __lowerCamelCase : List[Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) elif add_prefix: __lowerCamelCase : int = checkpoint __lowerCamelCase : Tuple = dump_path else: __lowerCamelCase : List[str] = None __lowerCamelCase : str = dump_path logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __lowerCamelCase : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __lowerCamelCase : List[Any] = file_path.split(UpperCAmelCase_ )[-1][0] if next_char == "/": __lowerCamelCase : Tuple = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = None logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' ) __lowerCamelCase : Union[str, Any] = tokenizer.save_pretrained( UpperCAmelCase_ , legacy_format=UpperCAmelCase_ , filename_prefix=UpperCAmelCase_ ) logger.info(F'=> File names {file_names}' ) for file_name in file_names: if not file_name.endswith('tokenizer.json' ): os.remove(UpperCAmelCase_ ) logger.info(F'=> removing {file_name}' ) if __name__ == "__main__": A__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files.""" ) parser.add_argument( """--tokenizer_name""", default=None, type=str, help=( f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' """download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--checkpoint_name""", default=None, type=str, help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""", ) parser.add_argument( """--force_download""", action="""store_true""", help="""Re-download checkpoints.""", ) A__ : List[str] = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
13
def _lowerCamelCase ( snake_case ): assert ( isinstance(snake_case , snake_case ) and number_of_steps > 0 ), F'number_of_steps needs to be positive integer, your input {number_of_steps}' if number_of_steps == 1: return 1 _lowerCAmelCase , _lowerCAmelCase = 1, 1 for _ in range(number_of_steps - 1 ): _lowerCAmelCase , _lowerCAmelCase = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
192
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
399
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class A ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any: super().__init__() __UpperCamelCase : Dict = pad_token_id __UpperCamelCase : int = max_length __UpperCamelCase : int = vocab __UpperCamelCase : Dict = merges __UpperCamelCase : Dict = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase ) @classmethod def a_ (cls , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: __UpperCamelCase : Optional[Any] = [" ".join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()] __UpperCamelCase : Union[str, Any] = tokenizer.get_vocab() return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) @classmethod def a_ (cls , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple: __UpperCamelCase : List[str] = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) @classmethod def a_ (cls , _UpperCAmelCase ) -> Optional[Any]: return cls(**_UpperCAmelCase ) def a_ (self ) -> str: return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Dict: __UpperCamelCase : List[Any] = self.tf_tokenizer(_UpperCAmelCase ) __UpperCamelCase : Union[str, Any] = tf.ones_like(_UpperCAmelCase ) if self.pad_token_id is not None: # pad the tokens up to max length __UpperCamelCase : Any = max_length if max_length is not None else self.max_length if max_length is not None: __UpperCamelCase , __UpperCamelCase : Optional[Any] = pad_model_inputs( _UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
399
1
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) UpperCamelCase_ : List[str] = """\ Text data. Second line of data.""" UpperCamelCase_ : str = """file""" @pytest.fixture(scope="session" ) def _lowerCAmelCase (_lowercase ): """simple docstring""" a__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") a__ = bytes(lowerCAmelCase_ , "utf-8" ) with zstd.open(lowerCAmelCase_ , "wb" ) as f: f.write(lowerCAmelCase_ ) return path @pytest.fixture def _lowerCAmelCase (_lowercase ): """simple docstring""" with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase_ ) , "w" ) as f: f.write(lowerCAmelCase_ ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): """simple docstring""" a__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} a__ = input_paths[compression_format] a__ = tmp_path / "cache" a__ = DownloadConfig(cache_dir=lowerCAmelCase_ , extract_compressed_file=lowerCAmelCase_ ) a__ = cached_path(lowerCAmelCase_ , download_config=lowerCAmelCase_ ) with open(lowerCAmelCase_ ) as f: a__ = f.read() with open(lowerCAmelCase_ ) as f: a__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): """simple docstring""" a__ = "custom_cache" a__ = "custom_extracted_dir" a__ = tmp_path / "custom_extracted_path" if default_extracted: a__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , lowerCAmelCase_ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCAmelCase_ ) ) a__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) a__ = xz_file a__ = ( DownloadConfig(extract_compressed_file=lowerCAmelCase_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase_ ) ) a__ = cached_path(lowerCAmelCase_ , download_config=lowerCAmelCase_ ) assert Path(lowerCAmelCase_ ).parent.parts[-2:] == expected def _lowerCAmelCase (_lowercase ): """simple docstring""" a__ = str(Path(lowerCAmelCase_ ).resolve() ) assert cached_path(lowerCAmelCase_ ) == text_file # relative path a__ = str(Path(lowerCAmelCase_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(lowerCAmelCase_ ) == text_file def _lowerCAmelCase (_lowercase ): """simple docstring""" a__ = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(lowerCAmelCase_ ): cached_path(lowerCAmelCase_ ) # relative path a__ = "./__missing_file__.txt" with pytest.raises(lowerCAmelCase_ ): cached_path(lowerCAmelCase_ ) def _lowerCAmelCase (_lowercase ): """simple docstring""" a__ = get_from_cache(F'tmp://{tmpfs_file}' ) with open(lowerCAmelCase_ ) as f: a__ = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase_ ) def _lowerCAmelCase (): """simple docstring""" with pytest.raises(lowerCAmelCase_ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase_ ) def _lowerCAmelCase (_lowercase ): """simple docstring""" a__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCAmelCase_ ): http_get("https://huggingface.co" , temp_file=lowerCAmelCase_ ) with pytest.raises(lowerCAmelCase_ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase_ ) def _lowerCAmelCase (_lowercase ): """simple docstring""" a__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCAmelCase_ ): ftp_get("ftp://huggingface.co" , temp_file=lowerCAmelCase_ ) with pytest.raises(lowerCAmelCase_ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase_ ) def _lowerCAmelCase (_lowercase ): """simple docstring""" a__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCAmelCase_ ): fsspec_get("s3://huggingface.co" , temp_file=lowerCAmelCase_ ) with pytest.raises(lowerCAmelCase_ ): fsspec_head("s3://huggingface.co" )
331
"""simple docstring""" from itertools import product def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[int]: _snake_case = sides_number _snake_case = max_face_number * dice_number _snake_case = [0] * (max_total + 1) _snake_case = 1 _snake_case = range(lowerCAmelCase_ , max_face_number + 1 ) for dice_numbers in product(lowerCAmelCase_ , repeat=lowerCAmelCase_ ): _snake_case = sum(lowerCAmelCase_ ) totals_frequencies[total] += 1 return totals_frequencies def snake_case ( ) -> float: _snake_case = total_frequency_distribution( sides_number=4 , dice_number=9 ) _snake_case = total_frequency_distribution( sides_number=6 , dice_number=6 ) _snake_case = 0 _snake_case = 9 _snake_case = 4 * 9 _snake_case = 6 for peter_total in range(lowerCAmelCase_ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _snake_case = (4**9) * (6**6) _snake_case = peter_wins_count / total_games_number _snake_case = round(lowerCAmelCase_ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"{solution() = }")
103
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class a__ ( snake_case , snake_case ): """simple docstring""" __lowerCamelCase = 'focalnet' def __init__( self , lowercase=224 , lowercase=4 , lowercase=3 , lowercase=96 , lowercase=False , lowercase=[192, 384, 768, 768] , lowercase=[2, 2, 6, 2] , lowercase=[2, 2, 2, 2] , lowercase=[3, 3, 3, 3] , lowercase="gelu" , lowercase=4.0 , lowercase=0.0 , lowercase=0.1 , lowercase=False , lowercase=1e-4 , lowercase=False , lowercase=False , lowercase=False , lowercase=0.02 , lowercase=1e-5 , lowercase=32 , lowercase=None , lowercase=None , **lowercase , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**lowercase ) A__ = image_size A__ = patch_size A__ = num_channels A__ = embed_dim A__ = use_conv_embed A__ = hidden_sizes A__ = depths A__ = focal_levels A__ = focal_windows A__ = hidden_act A__ = mlp_ratio A__ = hidden_dropout_prob A__ = drop_path_rate A__ = use_layerscale A__ = layerscale_value A__ = use_post_layernorm A__ = use_post_layernorm_in_modulation A__ = normalize_modulator A__ = initializer_range A__ = layer_norm_eps A__ = encoder_stride A__ = ['stem'] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] A__ = get_aligned_output_features_output_indices( out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
703
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") lowerCAmelCase__ = logging.getLogger(__name__) @dataclass class a__ : """simple docstring""" __lowerCamelCase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __lowerCamelCase = field( default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __lowerCamelCase = field( default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __lowerCamelCase = field( default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) __lowerCamelCase = field( default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) __lowerCamelCase = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __lowerCamelCase = field( default=snake_case , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) @dataclass class a__ : """simple docstring""" __lowerCamelCase = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} ) __lowerCamelCase = field( default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) __lowerCamelCase = field( default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) __lowerCamelCase = field( default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) __lowerCamelCase = field( default=snake_case , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. If passed, sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __lowerCamelCase = field( default=snake_case , metadata={ 'help': ( 'Whether to pad all samples to the maximum sentence length. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch. More ' 'efficient on GPU but very bad for TPU.' ) } , ) __lowerCamelCase = field( default=snake_case , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __lowerCamelCase = field( default=snake_case , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' if self.train_file is not None: A__ = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: A__ = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = True __lowerCamelCase = None __lowerCamelCase = None def __call__( self , lowercase ) -> Tuple: '''simple docstring''' A__ = "label" if "label" in features[0].keys() else "labels" A__ = [feature.pop(lowercase ) for feature in features] A__ = len(lowercase ) A__ = len(features[0]["input_ids"] ) A__ = [ [{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features ] A__ = list(chain(*lowercase ) ) A__ = self.tokenizer.pad( lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten A__ = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()} # Add back labels A__ = torch.tensor(lowercase , dtype=torch.intaa ) return batch def lowerCAmelCase__ ( ) -> List[Any]: '''simple docstring''' A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A__ , A__ , A__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A__ = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE_ ) datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. A__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: A__ = {} if data_args.train_file is not None: A__ = data_args.train_file if data_args.validation_file is not None: A__ = data_args.validation_file A__ = data_args.train_file.split("." )[-1] A__ = load_dataset( SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. A__ = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A__ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. A__ = [F'ending{i}' for i in range(4 )] A__ = "sent1" A__ = "sent2" if data_args.max_seq_length is None: A__ = tokenizer.model_max_length if max_seq_length > 1_0_2_4: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) A__ = 1_0_2_4 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) A__ = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(SCREAMING_SNAKE_CASE_: Optional[Any] ): A__ = [[context] * 4 for context in examples[context_name]] A__ = examples[question_header_name] A__ = [ [F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ ) ] # Flatten out A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) ) A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) ) # Tokenize A__ = tokenizer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) A__ = raw_datasets["train"] if data_args.max_train_samples is not None: A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples ) A__ = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): A__ = train_dataset.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) A__ = raw_datasets["validation"] if data_args.max_eval_samples is not None: A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples ) A__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): A__ = eval_dataset.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator A__ = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(SCREAMING_SNAKE_CASE_: str ): A__ , A__ = eval_predictions A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer A__ = Trainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , ) # Training if training_args.do_train: A__ = None if training_args.resume_from_checkpoint is not None: A__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: A__ = last_checkpoint A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ ) trainer.save_model() # Saves the tokenizer too for easy upload A__ = train_result.metrics A__ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ ) ) A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) A__ = trainer.evaluate() A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ ) A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ ) A__ = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict: '''simple docstring''' main() if __name__ == "__main__": main()
626
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
658
def _A ( SCREAMING_SNAKE_CASE__ : int ): if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
658
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
563
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' A_ = len(SCREAMING_SNAKE_CASE ) A_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): A_ = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): A_ = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: A_ = subset[i - 1][j] if arr[i - 1] <= j: A_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
563
1
"""simple docstring""" from random import shuffle import tensorflow as tf from numpy import array def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Any ): '''simple docstring''' snake_case_ : Optional[int] = int(__UpperCamelCase ) assert noofclusters < len(__UpperCamelCase ) # Find out the dimensionality snake_case_ : Tuple = len(vectors[0] ) # Will help select random centroids from among the available vectors snake_case_ : Dict = list(range(len(__UpperCamelCase ) ) ) shuffle(__UpperCamelCase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. snake_case_ : Any = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION snake_case_ : Optional[Any] = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points snake_case_ : Tuple = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(__UpperCamelCase ) ] ##These nodes will assign the centroid Variables the appropriate ##values snake_case_ : Dict = tf.placeholder("""float64""" , [dim] ) snake_case_ : Optional[Any] = [] for centroid in centroids: cent_assigns.append(tf.assign(__UpperCamelCase , __UpperCamelCase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) snake_case_ : int = [tf.Variable(0 ) for i in range(len(__UpperCamelCase ) )] ##These nodes will assign an assignment Variable the appropriate ##value snake_case_ : Optional[Any] = tf.placeholder("""int32""" ) snake_case_ : Optional[int] = [] for assignment in assignments: cluster_assigns.append(tf.assign(__UpperCamelCase , __UpperCamelCase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input snake_case_ : Any = tf.placeholder("""float""" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors snake_case_ : str = tf.reduce_mean(__UpperCamelCase , 0 ) ##Node for computing Euclidean distances # Placeholders for input snake_case_ : List[Any] = tf.placeholder("""float""" , [dim] ) snake_case_ : Any = tf.placeholder("""float""" , [dim] ) snake_case_ : Union[str, Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__UpperCamelCase , __UpperCamelCase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input snake_case_ : Union[str, Any] = tf.placeholder("""float""" , [noofclusters] ) snake_case_ : List[Any] = tf.argmin(__UpperCamelCase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. snake_case_ : Optional[Any] = tf.initialize_all_variables() # Initialize all variables sess.run(__UpperCamelCase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. snake_case_ : int = 1_0_0 for _ in range(__UpperCamelCase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(__UpperCamelCase ) ): snake_case_ : Dict = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. snake_case_ : List[str] = [ sess.run(__UpperCamelCase , feed_dict={va: vect, va: sess.run(__UpperCamelCase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input snake_case_ : Union[str, Any] = sess.run( __UpperCamelCase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(__UpperCamelCase ): # Collect all the vectors assigned to this cluster snake_case_ : Tuple = [ vectors[i] for i in range(len(__UpperCamelCase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location snake_case_ : str = sess.run( __UpperCamelCase , feed_dict={mean_input: array(__UpperCamelCase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments snake_case_ : Union[str, Any] = sess.run(__UpperCamelCase ) snake_case_ : Dict = sess.run(__UpperCamelCase ) return centroids, assignments
58
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A: Tuple = logging.get_logger(__name__) A: int = { "b0": efficientnet.EfficientNetBa, "b1": efficientnet.EfficientNetBa, "b2": efficientnet.EfficientNetBa, "b3": efficientnet.EfficientNetBa, "b4": efficientnet.EfficientNetBa, "b5": efficientnet.EfficientNetBa, "b6": efficientnet.EfficientNetBa, "b7": efficientnet.EfficientNetBa, } A: Optional[Any] = { "b0": { "hidden_dim": 1_2_8_0, "width_coef": 1.0, "depth_coef": 1.0, "image_size": 2_2_4, "dropout_rate": 0.2, "dw_padding": [], }, "b1": { "hidden_dim": 1_2_8_0, "width_coef": 1.0, "depth_coef": 1.1, "image_size": 2_4_0, "dropout_rate": 0.2, "dw_padding": [1_6], }, "b2": { "hidden_dim": 1_4_0_8, "width_coef": 1.1, "depth_coef": 1.2, "image_size": 2_6_0, "dropout_rate": 0.3, "dw_padding": [5, 8, 1_6], }, "b3": { "hidden_dim": 1_5_3_6, "width_coef": 1.2, "depth_coef": 1.4, "image_size": 3_0_0, "dropout_rate": 0.3, "dw_padding": [5, 1_8], }, "b4": { "hidden_dim": 1_7_9_2, "width_coef": 1.4, "depth_coef": 1.8, "image_size": 3_8_0, "dropout_rate": 0.4, "dw_padding": [6], }, "b5": { "hidden_dim": 2_0_4_8, "width_coef": 1.6, "depth_coef": 2.2, "image_size": 4_5_6, "dropout_rate": 0.4, "dw_padding": [1_3, 2_7], }, "b6": { "hidden_dim": 2_3_0_4, "width_coef": 1.8, "depth_coef": 2.6, "image_size": 5_2_8, "dropout_rate": 0.5, "dw_padding": [3_1], }, "b7": { "hidden_dim": 2_5_6_0, "width_coef": 2.0, "depth_coef": 3.1, "image_size": 6_0_0, "dropout_rate": 0.5, "dw_padding": [1_8], }, } def _snake_case ( UpperCamelCase : int ): UpperCAmelCase : List[str] = EfficientNetConfig() UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""hidden_dim"""] UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]["""width_coef"""] UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""depth_coef"""] UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""image_size"""] UpperCAmelCase : Any = CONFIG_MAP[model_name]["""dropout_rate"""] UpperCAmelCase : str = CONFIG_MAP[model_name]["""dw_padding"""] UpperCAmelCase : Any = """huggingface/label-files""" UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" UpperCAmelCase : Optional[Any] = 1000 UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : List[str] = {int(UpperCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase : Optional[int] = idalabel UpperCAmelCase : int = {v: k for k, v in idalabel.items()} return config def _snake_case ( ): UpperCAmelCase : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : List[str] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im def _snake_case ( UpperCamelCase : Optional[int] ): UpperCAmelCase : str = CONFIG_MAP[model_name]["""image_size"""] UpperCAmelCase : Dict = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=UpperCamelCase , ) return preprocessor def _snake_case ( UpperCamelCase : Optional[Any] ): UpperCAmelCase : Tuple = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] UpperCAmelCase : Optional[Any] = sorted(set(UpperCamelCase ) ) UpperCAmelCase : List[str] = len(UpperCamelCase ) UpperCAmelCase : List[str] = {b: str(UpperCamelCase ) for b, i in zip(UpperCamelCase , range(UpperCamelCase ) )} UpperCAmelCase : Optional[int] = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: UpperCAmelCase : int = block_name_mapping[b] rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) UpperCAmelCase : Tuple = {} for item in rename_keys: if item[0] in original_param_names: UpperCAmelCase : List[str] = """efficientnet.""" + item[1] UpperCAmelCase : List[str] = """classifier.weight""" UpperCAmelCase : Union[str, Any] = """classifier.bias""" return key_mapping def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str ): for key, value in tf_params.items(): if "normalization" in key: continue UpperCAmelCase : int = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: UpperCAmelCase : str = torch.from_numpy(np.transpose(UpperCamelCase ) ) else: UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(UpperCamelCase ) @torch.no_grad() def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ): UpperCAmelCase : List[Any] = model_classes[model_name]( include_top=UpperCamelCase , weights="""imagenet""" , input_tensor=UpperCamelCase , input_shape=UpperCamelCase , pooling=UpperCamelCase , classes=1000 , classifier_activation="""softmax""" , ) UpperCAmelCase : Any = original_model.trainable_variables UpperCAmelCase : Optional[int] = original_model.non_trainable_variables UpperCAmelCase : int = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCAmelCase : Optional[int] = param.numpy() UpperCAmelCase : Optional[Any] = list(tf_params.keys() ) # Load HuggingFace model UpperCAmelCase : Dict = get_efficientnet_config(UpperCamelCase ) UpperCAmelCase : int = EfficientNetForImageClassification(UpperCamelCase ).eval() UpperCAmelCase : Optional[int] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) UpperCAmelCase : Optional[Any] = rename_keys(UpperCamelCase ) replace_params(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Initialize preprocessor and preprocess input image UpperCAmelCase : Tuple = convert_image_processor(UpperCamelCase ) UpperCAmelCase : str = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCAmelCase : str = hf_model(**UpperCamelCase ) UpperCAmelCase : List[Any] = outputs.logits.detach().numpy() # Original model inference UpperCAmelCase : Any = False UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]["""image_size"""] UpperCAmelCase : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) UpperCAmelCase : Optional[int] = image.img_to_array(UpperCamelCase ) UpperCAmelCase : Optional[Any] = np.expand_dims(UpperCamelCase , axis=0 ) UpperCAmelCase : List[str] = original_model.predict(UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(UpperCamelCase ): os.mkdir(UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(UpperCamelCase ) preprocessor.save_pretrained(UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(F"Pushing converted {model_name} to the hub..." ) UpperCAmelCase : str = F"efficientnet-{model_name}" preprocessor.push_to_hub(UpperCamelCase ) hf_model.push_to_hub(UpperCamelCase ) if __name__ == "__main__": A: List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="b0", type=str, help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].", ) parser.add_argument( "--pytorch_dump_folder_path", default="hf_model", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--save_model", action="store_true", help="Save model to local") parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") A: Optional[int] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
160
0
def __UpperCamelCase ( _A : int = 1000 ) ->int: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ =1, 1 lowerCamelCase_ =2 while True: lowerCamelCase_ =0 lowerCamelCase_ =fa + fa lowerCamelCase_ , lowerCamelCase_ =fa, f index += 1 for _ in str(_A ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
75
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = ["pixel_values"] def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224} lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =resample lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] ) lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' ) return resize( _SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray: return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature: lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" ) lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCamelCase_ ={"""pixel_values""": images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
75
1
import os from collections.abc import Iterator def snake_case ( lowerCamelCase = "." ): '''simple docstring''' for dir_path, dir_names, filenames in os.walk(lowerCamelCase ): __lowercase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(lowerCamelCase )[1] in (".py", ".ipynb"): yield os.path.join(lowerCamelCase , lowerCamelCase ).lstrip("""./""" ) def snake_case ( lowerCamelCase ): '''simple docstring''' return F'{i * " "}*' if i else "\n##" def snake_case ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(lowerCamelCase ) or old_parts[i] != new_part) and new_part: print(F'{md_prefix(lowerCamelCase )} {new_part.replace("_" , " " ).title()}' ) return new_path def snake_case ( lowerCamelCase = "." ): '''simple docstring''' __lowercase = """""" for filepath in sorted(good_file_paths(lowerCamelCase ) ): __lowercase , __lowercase = os.path.split(lowerCamelCase ) if filepath != old_path: __lowercase = print_path(lowerCamelCase , lowerCamelCase ) __lowercase = (filepath.count(os.sep ) + 1) if filepath else 0 __lowercase = F'{filepath}/{filename}'.replace(""" """ , """%20""" ) __lowercase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0] print(F'{md_prefix(lowerCamelCase )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md(""".""")
80
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = { '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 'wav2vec2' def __init__(self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase=320 , lowerCamelCase=2 , lowerCamelCase=0.1 , lowerCamelCase=100 , lowerCamelCase=256 , lowerCamelCase=256 , lowerCamelCase=0.1 , lowerCamelCase="sum" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=(512, 512, 512, 512, 1_500) , lowerCamelCase=(5, 3, 3, 1, 1) , lowerCamelCase=(1, 2, 3, 1, 1) , lowerCamelCase=512 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ): '''simple docstring''' super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase ) _lowerCAmelCase = hidden_size _lowerCAmelCase = feat_extract_norm _lowerCAmelCase = feat_extract_activation _lowerCAmelCase = list(lowerCamelCase ) _lowerCAmelCase = list(lowerCamelCase ) _lowerCAmelCase = list(lowerCamelCase ) _lowerCAmelCase = conv_bias _lowerCAmelCase = num_conv_pos_embeddings _lowerCAmelCase = num_conv_pos_embedding_groups _lowerCAmelCase = len(self.conv_dim ) _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = num_attention_heads _lowerCAmelCase = hidden_dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = feat_proj_dropout _lowerCAmelCase = final_dropout _lowerCAmelCase = layerdrop _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = vocab_size _lowerCAmelCase = do_stable_layer_norm _lowerCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase = apply_spec_augment _lowerCAmelCase = mask_time_prob _lowerCAmelCase = mask_time_length _lowerCAmelCase = mask_time_min_masks _lowerCAmelCase = mask_feature_prob _lowerCAmelCase = mask_feature_length _lowerCAmelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCAmelCase = num_codevectors_per_group _lowerCAmelCase = num_codevector_groups _lowerCAmelCase = contrastive_logits_temperature _lowerCAmelCase = feat_quantizer_dropout _lowerCAmelCase = num_negatives _lowerCAmelCase = codevector_dim _lowerCAmelCase = proj_codevector_dim _lowerCAmelCase = diversity_loss_weight # ctc loss _lowerCAmelCase = ctc_loss_reduction _lowerCAmelCase = ctc_zero_infinity # adapter _lowerCAmelCase = add_adapter _lowerCAmelCase = adapter_kernel_size _lowerCAmelCase = adapter_stride _lowerCAmelCase = num_adapter_layers _lowerCAmelCase = output_hidden_size or hidden_size _lowerCAmelCase = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase = list(lowerCamelCase ) _lowerCAmelCase = list(lowerCamelCase ) _lowerCAmelCase = list(lowerCamelCase ) _lowerCAmelCase = xvector_output_dim @property def A__ (self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
156
0
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __lowerCAmelCase : def SCREAMING_SNAKE_CASE ( self: int ): torch.manual_seed(0 ) lowercase :Union[str, Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) lowercase :List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) lowercase :str = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowercase :Optional[int] = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) lowercase :Optional[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def SCREAMING_SNAKE_CASE ( self: List[str] ): torch.manual_seed(0 ) lowercase :Optional[Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) lowercase :int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) lowercase :Dict = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowercase :Optional[int] = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) lowercase :List[str] = DDPMScheduler( num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) lowercase :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def SCREAMING_SNAKE_CASE ( self: List[str] ): lowercase :List[str] = self.get_dummy_components() lowercase :int = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase :Any = self.get_dummy_inputs(_lowerCAmelCase ) lowercase :Dict = inputs["prompt"] lowercase :str = inputs["generator"] lowercase :Union[str, Any] = inputs["num_inference_steps"] lowercase :Union[str, Any] = inputs["output_type"] if "image" in inputs: lowercase :Union[str, Any] = inputs["image"] else: lowercase :List[Any] = None if "mask_image" in inputs: lowercase :Optional[Any] = inputs["mask_image"] else: lowercase :Tuple = None if "original_image" in inputs: lowercase :Optional[int] = inputs["original_image"] else: lowercase :Optional[int] = None lowercase , lowercase :Dict = pipe.encode_prompt(_lowerCAmelCase ) # inputs with prompt converted to embeddings lowercase :Any = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: lowercase :Union[str, Any] = image if mask_image is not None: lowercase :Dict = mask_image if original_image is not None: lowercase :str = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowercase :Optional[int] = pipe(**_lowerCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_lowerCAmelCase ) lowercase :List[Any] = self.pipeline_class.from_pretrained(_lowerCAmelCase ) pipe_loaded.to(_lowerCAmelCase ) pipe_loaded.set_progress_bar_config(disable=_lowerCAmelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_lowerCAmelCase , _lowerCAmelCase ) is None , F"`{optional_component}` did not stay set to None after loading." , ) lowercase :List[str] = self.get_dummy_inputs(_lowerCAmelCase ) lowercase :Optional[Any] = inputs["generator"] lowercase :Union[str, Any] = inputs["num_inference_steps"] lowercase :str = inputs["output_type"] # inputs with prompt converted to embeddings lowercase :Tuple = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: lowercase :int = image if mask_image is not None: lowercase :Dict = mask_image if original_image is not None: lowercase :str = original_image lowercase :Union[str, Any] = pipe_loaded(**_lowerCAmelCase )[0] lowercase :Optional[Any] = np.abs(to_np(_lowerCAmelCase ) - to_np(_lowerCAmelCase ) ).max() self.assertLess(_lowerCAmelCase , 1e-4 ) def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase :Union[str, Any] = self.get_dummy_components() lowercase :Tuple = self.pipeline_class(**_lowerCAmelCase ) pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase :List[str] = self.get_dummy_inputs(_lowerCAmelCase ) lowercase :Optional[int] = pipe(**_lowerCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_lowerCAmelCase ) lowercase :Any = self.pipeline_class.from_pretrained(_lowerCAmelCase ) pipe_loaded.to(_lowerCAmelCase ) pipe_loaded.set_progress_bar_config(disable=_lowerCAmelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowercase :Tuple = self.get_dummy_inputs(_lowerCAmelCase ) lowercase :List[str] = pipe_loaded(**_lowerCAmelCase )[0] lowercase :List[str] = np.abs(to_np(_lowerCAmelCase ) - to_np(_lowerCAmelCase ) ).max() self.assertLess(_lowerCAmelCase , 1e-4 )
453
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( lowerCAmelCase): _a = ['''image_processor''', '''tokenizer'''] _a = '''LayoutLMv2ImageProcessor''' _a = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''') def __init__( self: int , _lowerCAmelCase: Any=None , _lowerCAmelCase: List[Any]=None , **_lowerCAmelCase: Optional[Any] ): if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _lowerCAmelCase , ) lowercase :Optional[Any] = kwargs.pop("feature_extractor" ) lowercase :Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _lowerCAmelCase: Union[List[List[int]], List[List[List[int]]]] = None , _lowerCAmelCase: Optional[Union[List[int], List[List[int]]]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase: Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: int = 0 , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , **_lowerCAmelCase: Tuple , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes " "if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." ) # first, apply the image processor lowercase :int = self.image_processor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): lowercase :int = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase :int = features["words"] lowercase :Optional[Any] = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , ) # add pixel values lowercase :Any = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase :Any = self.get_overflowing_images(_lowerCAmelCase , encoded_inputs["overflow_to_sample_mapping"] ) lowercase :Optional[int] = images return encoded_inputs def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[Any] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase :Dict = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_lowerCAmelCase ) != len(_lowerCAmelCase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F" {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}" ) return images_with_overflow def SCREAMING_SNAKE_CASE ( self: Dict , *_lowerCAmelCase: str , **_lowerCAmelCase: List[Any] ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: str , *_lowerCAmelCase: Tuple , **_lowerCAmelCase: Union[str, Any] ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): return ["input_ids", "bbox", "attention_mask", "image"] @property def SCREAMING_SNAKE_CASE ( self: List[str] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowerCAmelCase , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self: List[Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowerCAmelCase , ) return self.image_processor
453
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Tuple = { "configuration_xmod": [ "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = [ "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", "XmodForQuestionAnswering", "XmodForSequenceClassification", "XmodForTokenClassification", "XmodModel", "XmodPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
63
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class UpperCamelCase ( _UpperCAmelCase ): def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = params A__ = np.array(UpperCAmelCase__ ) A__ = np.array([len(UpperCAmelCase__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , UpperCAmelCase__ ): return (self.token_ids[index], self.lengths[index]) def __len__( self ): return len(self.lengths ) def __A ( self ): assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __A ( self ): A__ = self.params.max_model_input_size A__ = self.lengths > max_len logger.info(F"""Splitting {sum(UpperCAmelCase__ )} too long sequences.""" ) def divide_chunks(UpperCAmelCase__ , UpperCAmelCase__ ): return [l[i : i + n] for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )] A__ = [] A__ = [] if self.params.mlm: A__ , A__ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: A__ , A__ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: A__ = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: A__ = np.insert(UpperCAmelCase__ , 0 , UpperCAmelCase__ ) if sub_s[-1] != sep_id: A__ = np.insert(UpperCAmelCase__ , len(UpperCAmelCase__ ) , UpperCAmelCase__ ) assert len(UpperCAmelCase__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCAmelCase__ ) new_tok_ids.extend(UpperCAmelCase__ ) new_lengths.extend([len(UpperCAmelCase__ ) for l in sub_seqs] ) A__ = np.array(UpperCAmelCase__ ) A__ = np.array(UpperCAmelCase__ ) def __A ( self ): A__ = len(self ) A__ = self.lengths > 11 A__ = self.token_ids[indices] A__ = self.lengths[indices] A__ = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def __A ( self ): if "unk_token" not in self.params.special_tok_ids: return else: A__ = self.params.special_tok_ids["unk_token"] A__ = len(self ) A__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) A__ = (unk_occs / self.lengths) < 0.5 A__ = self.token_ids[indices] A__ = self.lengths[indices] A__ = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def __A ( self ): if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __A ( self , UpperCAmelCase__ ): A__ = [t[0] for t in batch] A__ = [t[1] for t in batch] assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) # Max for paddings A__ = max(UpperCAmelCase__ ) # Pad token ids if self.params.mlm: A__ = self.params.special_tok_ids["pad_token"] else: A__ = self.params.special_tok_ids["unk_token"] A__ = [list(t.astype(UpperCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase__ )) for t in token_ids] assert len(tk_ ) == len(UpperCAmelCase__ ) assert all(len(UpperCAmelCase__ ) == max_seq_len_ for t in tk_ ) A__ = torch.tensor(tk_ ) # (bs, max_seq_len_) A__ = torch.tensor(UpperCAmelCase__ ) # (bs) return tk_t, lg_t
491
0
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class __lowercase ( _lowercase ): lowerCamelCase : Optional[Any] = "" lowerCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs def __init__(self , A = None , A = None , **A , ): super().__init__(self , **A ) lowerCamelCase_ : List[str] = repo_info lowerCamelCase_ : Union[str, Any] = token lowerCamelCase_ : str = None def UpperCAmelCase__ (self ): if self.dir_cache is None: lowerCamelCase_ : Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes lowerCamelCase_ : Tuple = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(A ): {'''name''': str(A ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def UpperCAmelCase__ (self , A , A = "rb" , **A , ): if not isinstance(self.repo_info , A ): raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) lowerCamelCase_ : Any = hf_hub_url(self.repo_info.id , A , revision=self.repo_info.sha ) return fsspec.open( A , mode=A , headers=get_authentication_headers_for_url(A , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def UpperCAmelCase__ (self , A , **A ): self._get_dirs() lowerCamelCase_ : Dict = self._strip_protocol(A ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(A ) def UpperCAmelCase__ (self , A , A=False , **A ): self._get_dirs() lowerCamelCase_ : Dict = PurePosixPath(path.strip('''/''' ) ) lowerCamelCase_ : Tuple = {} for p, f in self.dir_cache.items(): lowerCamelCase_ : Tuple = PurePosixPath(p.strip('''/''' ) ) lowerCamelCase_ : Any = p.parent if root == path: lowerCamelCase_ : Optional[int] = f lowerCamelCase_ : Any = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
721
'''simple docstring''' import itertools import math def lowercase_ ( _lowercase ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase_ ( ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ : Union[str, Any] = 2 while True: if is_prime(_lowercase ): yield num num += 1 def lowercase_ ( _lowercase = 10_001 ) -> int: '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , _lowercase ) ) if __name__ == "__main__": print(f'{solution() = }')
357
0
'''simple docstring''' from pathlib import Path import fire def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]: UpperCamelCase = Path(__UpperCamelCase ) UpperCamelCase = Path(__UpperCamelCase ) dest_dir.mkdir(exist_ok=__UpperCamelCase ) for path in src_dir.iterdir(): UpperCamelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] UpperCamelCase = dest_dir.joinpath(path.name ) print(__UpperCamelCase ) dest_path.open("""w""" ).write("""\n""".join(__UpperCamelCase ) ) if __name__ == "__main__": fire.Fire(minify)
301
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
0
"""simple docstring""" from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def A_ ( __lowercase , __lowercase , __lowercase , __lowercase , ): UpperCamelCase_ , UpperCamelCase_ : int =coefficient_matrix.shape UpperCamelCase_ , UpperCamelCase_ : str =constant_matrix.shape if rowsa != colsa: UpperCamelCase_ : str =F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(__lowercase ) if colsa != 1: UpperCamelCase_ : List[Any] =F'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(__lowercase ) if rowsa != rowsa: UpperCamelCase_ : Any =( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' F'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(__lowercase ) if len(__lowercase ) != rowsa: UpperCamelCase_ : Dict =( 'Number of initial values must be equal to number of rows in coefficient ' F'''matrix but received {len(__lowercase )} and {rowsa}''' ) raise ValueError(__lowercase ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) UpperCamelCase_ : NDArray[floataa] =np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =table.shape strictly_diagonally_dominant(__lowercase ) # Iterates the whole matrix for given number of times for _ in range(__lowercase ): UpperCamelCase_ : List[Any] =[] for row in range(__lowercase ): UpperCamelCase_ : Union[str, Any] =0 for col in range(__lowercase ): if col == row: UpperCamelCase_ : List[str] =table[row][col] elif col == cols - 1: UpperCamelCase_ : Tuple =table[row][col] else: temp += (-1) * table[row][col] * init_val[col] UpperCamelCase_ : Tuple =(temp + val) / denom new_val.append(__lowercase ) UpperCamelCase_ : str =new_val return [float(__lowercase ) for i in new_val] def A_ ( __lowercase ): UpperCamelCase_ , UpperCamelCase_ : List[Any] =table.shape UpperCamelCase_ : List[Any] =True for i in range(0 , __lowercase ): UpperCamelCase_ : int =0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
395
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class a__ ( A__ ): UpperCAmelCase__ = '''dpr''' def __init__( self :Dict , _lowerCamelCase :Optional[Any]=30_522 , _lowerCamelCase :Tuple=768 , _lowerCamelCase :List[Any]=12 , _lowerCamelCase :List[str]=12 , _lowerCamelCase :Dict=3_072 , _lowerCamelCase :Tuple="gelu" , _lowerCamelCase :Union[str, Any]=0.1 , _lowerCamelCase :Optional[Any]=0.1 , _lowerCamelCase :int=512 , _lowerCamelCase :Optional[Any]=2 , _lowerCamelCase :List[str]=0.02 , _lowerCamelCase :List[Any]=1E-1_2 , _lowerCamelCase :Union[str, Any]=0 , _lowerCamelCase :str="absolute" , _lowerCamelCase :int = 0 , **_lowerCamelCase :Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase ) UpperCamelCase_ : Optional[Any] =vocab_size UpperCamelCase_ : int =hidden_size UpperCamelCase_ : List[Any] =num_hidden_layers UpperCamelCase_ : str =num_attention_heads UpperCamelCase_ : Union[str, Any] =hidden_act UpperCamelCase_ : str =intermediate_size UpperCamelCase_ : Dict =hidden_dropout_prob UpperCamelCase_ : List[Any] =attention_probs_dropout_prob UpperCamelCase_ : Union[str, Any] =max_position_embeddings UpperCamelCase_ : Dict =type_vocab_size UpperCamelCase_ : Union[str, Any] =initializer_range UpperCamelCase_ : Dict =layer_norm_eps UpperCamelCase_ : Optional[int] =projection_dim UpperCamelCase_ : Dict =position_embedding_type
395
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType _lowerCAmelCase: List[str] = logging.get_logger(__name__) _lowerCAmelCase: Dict = { """openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""", } # fmt: off _lowerCAmelCase: Dict = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377, 1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211, 4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786, 11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791, 17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409, 34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361 ] _lowerCAmelCase: int = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627, 3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647, 7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793, 14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675, 22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865, 42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362 ] class lowercase_ (a_ ): snake_case ='whisper' snake_case =['past_key_values'] snake_case ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , lowercase_=51865 , lowercase_=80 , lowercase_=6 , lowercase_=4 , lowercase_=6 , lowercase_=4 , lowercase_=1536 , lowercase_=1536 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=50257 , lowercase_=True , lowercase_=True , lowercase_="gelu" , lowercase_=256 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=False , lowercase_=1500 , lowercase_=448 , lowercase_=50256 , lowercase_=50256 , lowercase_=50256 , lowercase_=None , lowercase_=[220, 50256] , lowercase_=False , lowercase_=256 , lowercase_=False , lowercase_=0.05 , lowercase_=10 , lowercase_=2 , lowercase_=0.0 , lowercase_=10 , lowercase_=0 , lowercase_=7 , **lowercase_ , ) -> Any: a__ =vocab_size a__ =num_mel_bins a__ =d_model a__ =encoder_layers a__ =encoder_attention_heads a__ =decoder_layers a__ =decoder_attention_heads a__ =decoder_ffn_dim a__ =encoder_ffn_dim a__ =dropout a__ =attention_dropout a__ =activation_dropout a__ =activation_function a__ =init_std a__ =encoder_layerdrop a__ =decoder_layerdrop a__ =use_cache a__ =encoder_layers a__ =scale_embedding # scale factor will be sqrt(d_model) if True a__ =max_source_positions a__ =max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. a__ =classifier_proj_size a__ =use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 a__ =apply_spec_augment a__ =mask_time_prob a__ =mask_time_length a__ =mask_time_min_masks a__ =mask_feature_prob a__ =mask_feature_length a__ =mask_feature_min_masks a__ =median_filter_width super().__init__( pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , ) class lowercase_ (a_ ): @property def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]: a__ =OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ]) if self.use_past: a__ ={0: '''batch'''} else: a__ ={0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_snake_case , direction='inputs') return common_inputs def __UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 22050 , lowercase_ = 5.0 , lowercase_ = 220 , ) -> Mapping[str, Any]: a__ =OrderedDict() a__ =OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , ) a__ =encoder_inputs['''input_features'''].shape[2] a__ =encoder_sequence_length // 2 if self.use_past else seq_length a__ =super().generate_dummy_inputs( preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case) a__ =encoder_inputs.pop('input_features') a__ =decoder_inputs.pop('decoder_input_ids') if "past_key_values" in decoder_inputs: a__ =decoder_inputs.pop('past_key_values') return dummy_inputs @property def __UpperCamelCase ( self) -> float: return 1e-3
20
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int: return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]: _UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) _UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] ) _UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) _UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] ) _UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) _UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] ) _UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) _UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]: if split_mlp_wi: _UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] _UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] _UpperCamelCase : Optional[Any] = (wi_a, wi_a) else: _UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] _UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict: return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int: _UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] ) _UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old print('''Split MLP:''' ,UpperCamelCase ) _UpperCamelCase : Optional[int] = collections.OrderedDict() # Shared embeddings. _UpperCamelCase : str = old['''token_embedder/embedding'''] # Encoder. for i in range(UpperCamelCase ): # Block i, layer 0 (Self Attention). _UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' ) _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' ) _UpperCamelCase : Tuple = layer_norm _UpperCamelCase : int = k.T _UpperCamelCase : int = o.T _UpperCamelCase : List[Any] = q.T _UpperCamelCase : Dict = v.T # Block i, layer 1 (MLP). _UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' ) _UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase ) _UpperCamelCase : Union[str, Any] = layer_norm if split_mlp_wi: _UpperCamelCase : Optional[Any] = wi[0].T _UpperCamelCase : Optional[Any] = wi[1].T else: _UpperCamelCase : List[Any] = wi.T _UpperCamelCase : Union[str, Any] = wo.T if scalable_attention: # convert the rel_embedding of each layer _UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup( UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T _UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale'''] if not scalable_attention: _UpperCamelCase : List[Any] = tax_relpos_bias_lookup( UpperCamelCase ,0 ,'''encoder''' ).T _UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup( UpperCamelCase ,0 ,'''decoder''' ).T if not is_encoder_only: # Decoder. for i in range(UpperCamelCase ): # Block i, layer 0 (Self Attention). _UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' ) _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' ) _UpperCamelCase : int = layer_norm _UpperCamelCase : Union[str, Any] = k.T _UpperCamelCase : Optional[int] = o.T _UpperCamelCase : Dict = q.T _UpperCamelCase : Tuple = v.T # Block i, layer 1 (Cross Attention). _UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' ) _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' ) _UpperCamelCase : Dict = layer_norm _UpperCamelCase : Optional[int] = k.T _UpperCamelCase : int = o.T _UpperCamelCase : List[Any] = q.T _UpperCamelCase : str = v.T # Block i, layer 2 (MLP). _UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' ) _UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase ) _UpperCamelCase : List[str] = layer_norm if split_mlp_wi: _UpperCamelCase : Optional[Any] = wi[0].T _UpperCamelCase : Union[str, Any] = wi[1].T else: _UpperCamelCase : Dict = wi.T _UpperCamelCase : Any = wo.T if scalable_attention: # convert the rel_embedding of each layer _UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T _UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale'''] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T return new def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]: _UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _UpperCamelCase : str = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _UpperCamelCase : int = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) _UpperCamelCase : Any = state_dict['''shared.weight'''] return state_dict def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any: _UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase ) _UpperCamelCase : str = convert_tax_to_pytorch( UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase ) _UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase ) model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase ) def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int: _UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase ) print(f'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase ) else: _UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase ) # Load weights from tf checkpoint load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(UpperCamelCase ) # Verify that we can load the checkpoint. model.from_pretrained(UpperCamelCase ) print('''Done''' ) if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) parser.add_argument( """--scalable_attention""", action="""store_true""", help="""Whether the model uses scaled attention (umt5 model)""", default=False, ) _UpperCAmelCase : Union[str, Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
683
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class _A ( _lowerCamelCase ): _UpperCamelCase : List[Any] = '''ctrl''' _UpperCamelCase : Any = ['''past_key_values'''] _UpperCamelCase : Optional[int] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Dict , _A : Optional[Any]=246_534 , _A : Any=256 , _A : int=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Tuple=16 , _A : Optional[int]=0.1 , _A : Any=0.1 , _A : Optional[int]=1E-6 , _A : str=0.02 , _A : Optional[Any]=True , **_A : List[str] , ) -> Dict: """simple docstring""" lowercase : Optional[Any] = vocab_size lowercase : Dict = n_positions lowercase : Dict = n_embd lowercase : Optional[Any] = n_layer lowercase : Tuple = n_head lowercase : Dict = dff lowercase : int = resid_pdrop lowercase : str = embd_pdrop lowercase : List[Any] = layer_norm_epsilon lowercase : List[str] = initializer_range lowercase : Dict = use_cache super().__init__(**_A )
712
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('>=', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowerCAmelCase_ = get_logger(__name__) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> Optional[Any]: '''simple docstring''' os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) with FSDP.state_dict_type( __magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowercase : Any = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowercase : Optional[Any] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ ) if accelerator.process_index == 0: logger.info(F"""Saving model to {output_model_file}""" ) torch.save(__magic_name__ , __magic_name__ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowercase : List[str] = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ ) logger.info(F"""Saving model to {output_model_file}""" ) torch.save(__magic_name__ , __magic_name__ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowercase : Union[str, Any] = os.path.join(__magic_name__ , F"""{MODEL_NAME}_{model_index}""" ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) logger.info(F"""Saving model to {ckpt_dir}""" ) lowercase : Optional[Any] = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=__magic_name__ , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Model saved to {ckpt_dir}""" ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> Optional[Any]: '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( __magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__magic_name__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return lowercase : List[str] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" lowercase : Union[str, Any] = os.path.join(__magic_name__ , __magic_name__ ) logger.info(F"""Loading model from {input_model_file}""" ) lowercase : Union[str, Any] = torch.load(__magic_name__ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowercase : Tuple = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) lowercase : Any = os.path.join(__magic_name__ , __magic_name__ ) logger.info(F"""Loading model from {input_model_file}""" ) lowercase : Union[str, Any] = torch.load(__magic_name__ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowercase : Optional[int] = ( os.path.join(__magic_name__ , F"""{MODEL_NAME}_{model_index}""" ) if F"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading model from {ckpt_dir}""" ) lowercase : Optional[int] = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=__magic_name__ , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , planner=DefaultLoadPlanner() , ) lowercase : Dict = state_dict['''model'''] logger.info(F"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(__magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> int: '''simple docstring''' os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) with FSDP.state_dict_type( __magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowercase : Tuple = FSDP.optim_state_dict(__magic_name__ , __magic_name__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: lowercase : List[Any] = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) lowercase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ ) logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(__magic_name__ , __magic_name__ ) logger.info(F"""Optimizer state saved in {output_optimizer_file}""" ) else: lowercase : Tuple = os.path.join(__magic_name__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) logger.info(F"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(__magic_name__ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Optimizer state saved in {ckpt_dir}""" ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0 ) -> str: '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( __magic_name__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowercase : Tuple = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: lowercase : int = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) lowercase : Dict = os.path.join(__magic_name__ , __magic_name__ ) logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" ) lowercase : Optional[int] = torch.load(__magic_name__ ) logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" ) else: lowercase : str = ( os.path.join(__magic_name__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if F"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading Optimizer from {ckpt_dir}""" ) lowercase : int = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(__magic_name__ ) , ) lowercase : Optional[int] = optim_state['''optimizer'''] logger.info(F"""Optimizer loaded from {ckpt_dir}""" ) lowercase : int = FSDP.optim_state_dict_to_load(__magic_name__ , __magic_name__ , __magic_name__ ) optimizer.load_state_dict(__magic_name__ )
596
0
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class lowercase__( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=1_3 , SCREAMING_SNAKE_CASE_ : Tuple=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE_ : int=3_2 , SCREAMING_SNAKE_CASE_ : List[Any]=5 , SCREAMING_SNAKE_CASE_ : List[str]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Dict=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=4 , ) -> List[Any]: lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_attention_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_choices def _lowercase ( self : Optional[int] ) -> Dict: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_attention_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowercase ( self : Optional[Any] ) -> Dict: lowercase_ = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def _lowercase ( self : List[Any] ) -> Tuple: lowercase_ = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs lowercase_ = True lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowercase__( UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Union[str, Any] = True a :Any = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def _lowercase ( self : List[str] ) -> Optional[int]: lowercase_ = FlaxBertModelTester(self ) @slow def _lowercase ( self : Tuple ) -> Dict: # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. lowercase_ = FlaxBertModel.from_pretrained('''bert-base-cased''' ) lowercase_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
97
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _snake_case ( _a ): _A : List[str] = '''camembert''' def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_522 ,SCREAMING_SNAKE_CASE__ : int=768 ,SCREAMING_SNAKE_CASE__ : List[Any]=12 ,SCREAMING_SNAKE_CASE__ : Any=12 ,SCREAMING_SNAKE_CASE__ : Tuple=3_072 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1e-12 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Any="absolute" ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE__ : Tuple ,): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE:str = hidden_size SCREAMING_SNAKE_CASE:str = num_hidden_layers SCREAMING_SNAKE_CASE:List[str] = num_attention_heads SCREAMING_SNAKE_CASE:Optional[int] = hidden_act SCREAMING_SNAKE_CASE:int = intermediate_size SCREAMING_SNAKE_CASE:List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE:Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE:str = max_position_embeddings SCREAMING_SNAKE_CASE:Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE:Optional[int] = initializer_range SCREAMING_SNAKE_CASE:Tuple = layer_norm_eps SCREAMING_SNAKE_CASE:Optional[Any] = position_embedding_type SCREAMING_SNAKE_CASE:Optional[int] = use_cache SCREAMING_SNAKE_CASE:List[Any] = classifier_dropout class _snake_case ( _a ): @property def __UpperCamelCase ( self : List[str] ): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE:Any = {0: "batch", 1: "choice", 2: "sequence"} else: SCREAMING_SNAKE_CASE:str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
143
0
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": _lowerCAmelCase : str = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") _lowerCAmelCase : int = F'''https://www.google.com/search?q={query}&num=100''' _lowerCAmelCase : Any = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: _lowerCAmelCase : str = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: _lowerCAmelCase : Optional[Any] = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
712
'''simple docstring''' from __future__ import annotations def _A ( snake_case__ : list[float] , snake_case__ : list[float] ): snake_case__ : Dict = sorted(numsa + numsa ) snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()] _lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
694
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger() @dataclass class lowerCAmelCase_ : """simple docstring""" UpperCAmelCase__ = 42 UpperCAmelCase__ = field(default_factory=_a ) UpperCAmelCase__ = field(default_factory=_a ) def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: __UpperCamelCase = len(list(m.modules() ) ) == 1 or isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad ) if has_not_submodules: self.traced.append(_SCREAMING_SNAKE_CASE ) def __call__( self , _SCREAMING_SNAKE_CASE ) -> str: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_SCREAMING_SNAKE_CASE ) [x.remove() for x in self.handles] return self @property def __lowercase( self ) -> List[str]: return list(filter(lambda _SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase_ : """simple docstring""" UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 1 UpperCAmelCase__ = field(default_factory=_a ) UpperCAmelCase__ = field(default_factory=_a ) UpperCAmelCase__ = True def __call__( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]: __UpperCamelCase = Tracker(self.dest )(_SCREAMING_SNAKE_CASE ).parametrized __UpperCamelCase = Tracker(self.src )(_SCREAMING_SNAKE_CASE ).parametrized __UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.src_skip , _SCREAMING_SNAKE_CASE ) ) __UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.dest_skip , _SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ) and self.raise_if_mismatch: raise Exception( f"""Numbers of operations are different. Source module has {len(_SCREAMING_SNAKE_CASE )} operations while""" f""" destination module has {len(_SCREAMING_SNAKE_CASE )}.""" ) for dest_m, src_m in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) class lowerCAmelCase_ ( nn.Module ): """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE ) -> Dict: super().__init__() __UpperCamelCase = [] # - get the stem feature_blocks.append(('conv1', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('block' ), f"""Unexpected layer name {k}""" __UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) + 1 feature_blocks.append((f"""res{block_index}""", v) ) __UpperCamelCase = nn.ModuleDict(_SCREAMING_SNAKE_CASE ) def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> Tuple: return get_trunk_forward_outputs( _SCREAMING_SNAKE_CASE , out_feat_keys=_SCREAMING_SNAKE_CASE , feature_blocks=self._feature_blocks , ) class lowerCAmelCase_ ( _a ): """simple docstring""" def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> str: __UpperCamelCase = x.split('-' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Callable[[], Tuple[nn.Module, Dict]]: if x not in self: __UpperCamelCase = self.convert_name_to_timm(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = partial(lambda: (timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ).eval(), None) ) else: __UpperCamelCase = super().__getitem__(_SCREAMING_SNAKE_CASE ) return val class lowerCAmelCase_ ( _a ): """simple docstring""" def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Callable[[], nn.Module]: if "seer" in x and "in1k" not in x: __UpperCamelCase = RegNetModel else: __UpperCamelCase = RegNetForImageClassification return val def _a ( __lowercase , __lowercase , __lowercase ) -> List[Any]: """simple docstring""" for from_key, to_key in keys: __UpperCamelCase = from_state_dict[from_key].clone() print(F"""Copied key={from_key} to={to_key}""" ) return to_state_dict def _a ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ) -> Optional[Any]: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): __UpperCamelCase , __UpperCamelCase = from_model_func() __UpperCamelCase = our_model_func(_lowerCAmelCase ).eval() __UpperCamelCase = ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase , raise_if_mismatch=_lowerCAmelCase ) __UpperCamelCase = torch.randn((1, 3, 224, 224) ) module_transfer(_lowerCAmelCase ) if from_state_dict is not None: __UpperCamelCase = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: __UpperCamelCase = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')] __UpperCamelCase = manually_copy_vissl_head(_lowerCAmelCase , our_model.state_dict() , _lowerCAmelCase ) our_model.load_state_dict(_lowerCAmelCase ) __UpperCamelCase = our_model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase ) __UpperCamelCase = ( our_outputs.logits if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else our_outputs.last_hidden_state ) __UpperCamelCase = from_model(_lowerCAmelCase ) __UpperCamelCase = from_output[-1] if type(_lowerCAmelCase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: __UpperCamelCase = our_outputs.hidden_states[-1] assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=_lowerCAmelCase , ) __UpperCamelCase = 224 if 'seer' not in name else 384 # we can use the convnext one __UpperCamelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=_lowerCAmelCase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=_lowerCAmelCase , ) print(F"""Pushed {name}""" ) def _a ( __lowercase , __lowercase = None , __lowercase = True ) -> str: """simple docstring""" __UpperCamelCase = 'imagenet-1k-id2label.json' __UpperCamelCase = 1000 __UpperCamelCase = (1, num_labels) __UpperCamelCase = 'huggingface/label-files' __UpperCamelCase = num_labels __UpperCamelCase = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) ) , 'r' ) ) __UpperCamelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} __UpperCamelCase = idalabel __UpperCamelCase = {v: k for k, v in idalabel.items()} __UpperCamelCase = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase ) __UpperCamelCase = { 'regnet-x-002': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x' ), 'regnet-x-004': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x' ), 'regnet-x-006': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x' ), 'regnet-x-008': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x' ), 'regnet-x-016': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x' ), 'regnet-x-032': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='x' ), 'regnet-x-040': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='x' ), 'regnet-x-064': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='x' ), 'regnet-x-080': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='x' ), 'regnet-x-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='x' ), 'regnet-x-160': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='x' ), 'regnet-x-320': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='x' ), # y variant 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), 'regnet-y-004': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), 'regnet-y-006': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), 'regnet-y-008': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), 'regnet-y-016': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), 'regnet-y-032': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), 'regnet-y-040': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), 'regnet-y-064': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), 'regnet-y-080': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), 'regnet-y-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), 'regnet-y-160': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), 'regnet-y-320': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), 'regnet-y-1280-seer': RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), 'regnet-y-2560-seer': RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), 'regnet-y-10b-seer': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ), # finetuned on imagenet 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ), } __UpperCamelCase = NameToOurModelFuncMap() __UpperCamelCase = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(__lowercase , __lowercase ) -> Tuple[nn.Module, Dict]: __UpperCamelCase = torch.hub.load_state_dict_from_url(_lowerCAmelCase , model_dir=str(_lowerCAmelCase ) , map_location='cpu' ) __UpperCamelCase = model_func() # check if we have a head, if yes add it __UpperCamelCase = files['classy_state_dict']['base_model']['model'] __UpperCamelCase = model_state_dict['trunk'] model.load_state_dict(_lowerCAmelCase ) return model.eval(), model_state_dict["heads"] # pretrained __UpperCamelCase = partial( _lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) __UpperCamelCase = partial( _lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) __UpperCamelCase = partial( _lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) __UpperCamelCase = partial( _lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned __UpperCamelCase = partial( _lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) __UpperCamelCase = partial( _lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) __UpperCamelCase = partial( _lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) __UpperCamelCase = partial( _lowerCAmelCase , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( _lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( _lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) return config, expected_shape if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help=( 'The name of the model you wish to convert, it must be one of the supported regnet* architecture,' ' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=Path, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=True, type=bool, required=False, help='If True, push model and image processor to the hub.', ) _snake_case = parser.parse_args() _snake_case = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
383
def _lowerCAmelCase ( _lowerCAmelCase = 100 ) -> int: '''simple docstring''' __snake_case = n * (n + 1) * (2 * n + 1) / 6 __snake_case = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
371
0
"""simple docstring""" import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process _UpperCAmelCase = logging.getLogger(__name__) _UpperCAmelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) _UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class a : UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase__ )} , ) UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) UpperCamelCase : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) def lowerCamelCase__ ( self : List[Any] ) -> List[str]: '''simple docstring''' if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( """--config_overrides can't be used in combination with --config_name or --model_name_or_path""" ) @dataclass class a : UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) UpperCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={'help': 'The input training data file (a text file).'} ) UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , ) UpperCamelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , ) UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) UpperCamelCase : Optional[int] = field( default=5 , metadata={ 'help': 'The percentage of the train set used as validation set in case there\'s no validation split' } , ) UpperCamelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated. Default to the max input length of the model.' ) } , ) UpperCamelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) UpperCamelCase : float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' if self.train_file is not None: SCREAMING_SNAKE_CASE_: Any =self.train_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: SCREAMING_SNAKE_CASE_: List[str] =self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def __magic_name__ ( lowercase , lowercase ): with open(lowercase , """r""" , encoding="""utf-8""" ) as f: SCREAMING_SNAKE_CASE_: int =[json.loads(lowercase ) for line in f.read().splitlines() if (len(lowercase ) > 0 and not line.isspace())] assert len(lowercase ) == len(lowercase ) SCREAMING_SNAKE_CASE_: Union[str, Any] ={c: dataset[c] for c in dataset.column_names} SCREAMING_SNAKE_CASE_: List[str] =refs return Dataset.from_dict(lowercase ) def __magic_name__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE_: Any =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE_: Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE_: Dict =parser.parse_args_into_dataclasses() # Detecting last checkpoint. SCREAMING_SNAKE_CASE_: Union[str, Any] =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE_: Tuple =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowercase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. SCREAMING_SNAKE_CASE_: List[str] =load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): SCREAMING_SNAKE_CASE_: Optional[Any] =load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , ) SCREAMING_SNAKE_CASE_: Optional[int] =load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , ) else: SCREAMING_SNAKE_CASE_: List[Any] ={} if data_args.train_file is not None: SCREAMING_SNAKE_CASE_: List[str] =data_args.train_file if data_args.validation_file is not None: SCREAMING_SNAKE_CASE_: Union[str, Any] =data_args.validation_file SCREAMING_SNAKE_CASE_: Union[str, Any] =data_args.train_file.split(""".""" )[-1] if extension == "txt": SCREAMING_SNAKE_CASE_: Union[str, Any] ="""text""" SCREAMING_SNAKE_CASE_: Optional[int] =load_dataset(lowercase , data_files=lowercase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE_: Any ={ """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: SCREAMING_SNAKE_CASE_: int =AutoConfig.from_pretrained(model_args.config_name , **lowercase ) elif model_args.model_name_or_path: SCREAMING_SNAKE_CASE_: Optional[Any] =AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase ) else: SCREAMING_SNAKE_CASE_: List[str] =CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(f'''New config: {config}''' ) SCREAMING_SNAKE_CASE_: List[Any] ={ """cache_dir""": model_args.cache_dir, """use_fast""": model_args.use_fast_tokenizer, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase ) elif model_args.model_name_or_path: SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) if model_args.model_name_or_path: SCREAMING_SNAKE_CASE_: Dict =AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) SCREAMING_SNAKE_CASE_: str =AutoModelForMaskedLM.from_config(lowercase ) model.resize_token_embeddings(len(lowercase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: SCREAMING_SNAKE_CASE_: str =datasets["""train"""].column_names else: SCREAMING_SNAKE_CASE_: List[Any] =datasets["""validation"""].column_names SCREAMING_SNAKE_CASE_: Dict ="""text""" if """text""" in column_names else column_names[0] SCREAMING_SNAKE_CASE_: List[str] ="""max_length""" if data_args.pad_to_max_length else False def tokenize_function(lowercase ): # Remove empty lines SCREAMING_SNAKE_CASE_: List[str] =[line for line in examples["""text"""] if len(lowercase ) > 0 and not line.isspace()] return tokenizer(examples["""text"""] , padding=lowercase , truncation=lowercase , max_length=data_args.max_seq_length ) SCREAMING_SNAKE_CASE_: int =datasets.map( lowercase , batched=lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: SCREAMING_SNAKE_CASE_: Dict =add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: SCREAMING_SNAKE_CASE_: List[Any] =add_chinese_references( tokenized_datasets["""validation"""] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer SCREAMING_SNAKE_CASE_: Dict =data_args.train_ref_file or data_args.validation_ref_file if has_ref: SCREAMING_SNAKE_CASE_: Tuple =False # Data collator # This one will take care of randomly masking the tokens. SCREAMING_SNAKE_CASE_: Tuple =DataCollatorForWholeWordMask(tokenizer=lowercase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer SCREAMING_SNAKE_CASE_: Optional[Any] =Trainer( model=lowercase , args=lowercase , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , ) # Training if training_args.do_train: if last_checkpoint is not None: SCREAMING_SNAKE_CASE_: Optional[Any] =last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): SCREAMING_SNAKE_CASE_: Dict =model_args.model_name_or_path else: SCREAMING_SNAKE_CASE_: Union[str, Any] =None SCREAMING_SNAKE_CASE_: str =trainer.train(resume_from_checkpoint=lowercase ) trainer.save_model() # Saves the tokenizer too for easy upload SCREAMING_SNAKE_CASE_: Any =os.path.join(training_args.output_dir , """train_results.txt""" ) if trainer.is_world_process_zero(): with open(lowercase , """w""" ) as writer: logger.info("""***** Train results *****""" ) for key, value in sorted(train_result.metrics.items() ): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # Evaluation SCREAMING_SNAKE_CASE_: List[str] ={} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE_: List[Any] =trainer.evaluate() SCREAMING_SNAKE_CASE_: Dict =math.exp(eval_output["""eval_loss"""] ) SCREAMING_SNAKE_CASE_: Tuple =perplexity SCREAMING_SNAKE_CASE_: List[str] =os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" ) if trainer.is_world_process_zero(): with open(lowercase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in sorted(results.items() ): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) return results def __magic_name__ ( lowercase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
720
"""simple docstring""" def __magic_name__ ( lowercase ): if upper_limit < 0: raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" ) SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 SCREAMING_SNAKE_CASE_: Any =1 if upper_limit > 0: SCREAMING_SNAKE_CASE_: List[str] =1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowercase ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: _UpperCAmelCase = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(f"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
36
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=18 , SCREAMING_SNAKE_CASE__ : List[str]=30 , SCREAMING_SNAKE_CASE__ : Dict=400 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=[0.48_145_466, 0.4_578_275, 0.40_821_073] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , ) -> int: lowerCAmelCase__ = size if size is not None else {"height": 224, "width": 224} lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18} lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = image_size lowerCAmelCase__ = min_resolution lowerCAmelCase__ = max_resolution lowerCAmelCase__ = do_resize lowerCAmelCase__ = size lowerCAmelCase__ = do_center_crop lowerCAmelCase__ = crop_size lowerCAmelCase__ = do_normalize lowerCAmelCase__ = image_mean lowerCAmelCase__ = image_std lowerCAmelCase__ = do_convert_rgb def a ( self : Union[str, Any] ) -> Union[str, Any]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]: assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCAmelCase__ = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCAmelCase__ = [] for i in range(self.batch_size ): lowerCAmelCase__ , lowerCAmelCase__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCAmelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCAmelCase__ = [torch.from_numpy(SCREAMING_SNAKE_CASE__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case__ = ChineseCLIPImageProcessor if is_vision_available() else None def a ( self : Optional[Any] ) -> List[str]: lowerCAmelCase__ = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE__ ) @property def a ( self : int ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def a ( self : Tuple ) -> int: lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_center_crop" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "center_crop" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_convert_rgb" ) ) def a ( self : Dict ) -> int: lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 224, "width": 224} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def a ( self : str ) -> Optional[int]: pass def a ( self : Tuple ) -> Union[str, Any]: # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def a ( self : Union[str, Any] ) -> Dict: # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def a ( self : Optional[Any] ) -> Any: # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) @require_torch @require_vision class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case__ = ChineseCLIPImageProcessor if is_vision_available() else None def a ( self : Optional[int] ) -> Union[str, Any]: lowerCAmelCase__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = 3 @property def a ( self : int ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def a ( self : List[str] ) -> Dict: lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_center_crop" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "center_crop" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_convert_rgb" ) ) def a ( self : Any ) -> Optional[int]: pass def a ( self : str ) -> Union[str, Any]: # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
61
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase_ : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=50 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=None , ) ->Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = use_labels A__ = scope def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.prepare_config_and_inputs() A__ = True A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] , ) ->Dict: '''simple docstring''' A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] , ) ->Dict: '''simple docstring''' A__ = True A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] , ) ->Any: '''simple docstring''' A__ = True A__ = True A__ = BertGenerationDecoder(config=UpperCAmelCase__).to(UpperCAmelCase__).eval() # first forward pass A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size) A__ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] , dim=-1) A__ = torch.cat([input_mask, next_mask] , dim=-1) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] # select random slice A__ = ids_tensor((1,) , output_from_past.shape[-1]).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : List[str] , ) ->List[Any]: '''simple docstring''' A__ = BertGenerationDecoder(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ = BertGenerationEncoderTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() A__ = '''bert''' self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') self.assertIsNotNone(UpperCAmelCase__) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 1_024]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 50_358]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
87
0
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer __UpperCAmelCase : List[Any] = logging.get_logger(__name__) __UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __UpperCAmelCase : List[str] = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } __UpperCAmelCase : List[Any] = { "Salesforce/codegen-350M-mono": 2_048, } class __snake_case ( __a ): '''simple docstring''' lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ["""input_ids""", """attention_mask"""] lowerCAmelCase__ = CodeGenTokenizer def __init__( self : str , A : List[str]=None , A : str=None , A : Any=None , A : List[Any]="<|endoftext|>" , A : int="<|endoftext|>" , A : List[Any]="<|endoftext|>" , A : int=False , **A : List[Any] , ): super().__init__( a_ , a_ , tokenizer_file=a_ , unk_token=a_ , bos_token=a_ , eos_token=a_ , add_prefix_space=a_ , **a_ , ) if kwargs.pop("""add_bos_token""" , a_ ): __snake_case: Union[str, Any] = kwargs.pop("""name_or_path""" , """""" ) raise ValueError( """Currenty GPT2's fast tokenizer does NOT support adding a BOS token.""" """Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n""" f'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' f'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' """This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.""" """ so that the fast tokenizer works correctly.""" ) __snake_case: Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , a_ ) != add_prefix_space: __snake_case: str = getattr(a_ , pre_tok_state.pop("""type""" ) ) __snake_case: Dict = add_prefix_space __snake_case: List[Any] = pre_tok_class(**a_ ) __snake_case: List[str] = add_prefix_space def UpperCAmelCase__ ( self : List[Any] , *A : Tuple , **A : Optional[int] ): __snake_case: Union[str, Any] = kwargs.get("""is_split_into_words""" , a_ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*a_ , **a_ ) def UpperCAmelCase__ ( self : Optional[int] , *A : Dict , **A : Optional[int] ): __snake_case: int = kwargs.get("""is_split_into_words""" , a_ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*a_ , **a_ ) def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[str] = None ): __snake_case: int = self._tokenizer.model.save(a_ , name=a_ ) return tuple(a_ ) def UpperCAmelCase__ ( self : List[str] , A : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , A : bool = False , A : bool = None , A : Optional[List[str]] = None , **A : Optional[int] , ): __snake_case: str = super().decode( token_ids=a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ , **a_ , ) if truncate_before_pattern is not None and len(a_ ) > 0: __snake_case: int = self.truncate(a_ , a_ ) return decoded_text def UpperCAmelCase__ ( self : List[Any] , A : List[str] , A : str ): def find_re(A : Union[str, Any] , A : List[Any] , A : List[str] ): __snake_case: str = pattern.search(a_ , a_ ) return m.start() if m else -1 __snake_case: Optional[int] = [re.compile(a_ , re.MULTILINE ) for pattern in truncate_before_pattern] __snake_case: List[str] = list(re.finditer("""^print""" , a_ , re.MULTILINE ) ) if len(a_ ) > 1: __snake_case: List[str] = completion[: prints[1].start()] __snake_case: List[Any] = list(re.finditer("""^def""" , a_ , re.MULTILINE ) ) if len(a_ ) > 1: __snake_case: Optional[int] = completion[: defs[1].start()] __snake_case: str = 0 __snake_case: str = [ pos for pos in [find_re(a_ , a_ , a_ ) for terminal in terminals] if pos != -1 ] if len(a_ ) > 0: return completion[: min(a_ )] else: return completion
706
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' @register_to_config def __init__( self : int , A : int , A : int , A : int , A : float , A : int , A : int , A : int , A : int , A : str , A : bool = False , ): super().__init__() __snake_case: List[str] = nn.Embedding(A , A ) __snake_case: Optional[Any] = nn.Embedding(A , A ) __snake_case: Optional[Any] = False __snake_case: Optional[Any] = nn.Dropout(p=A ) __snake_case: Optional[Any] = TaConfig( vocab_size=A , d_model=A , num_heads=A , d_kv=A , d_ff=A , dropout_rate=A , feed_forward_proj=A , is_decoder=A , is_encoder_decoder=A , ) __snake_case: Union[str, Any] = nn.ModuleList() for lyr_num in range(A ): __snake_case: Optional[int] = TaBlock(A ) self.encoders.append(A ) __snake_case: Optional[int] = TaLayerNorm(A ) __snake_case: List[Any] = nn.Dropout(p=A ) def UpperCAmelCase__ ( self : Union[str, Any] , A : int , A : Any ): __snake_case: Tuple = self.token_embedder(A ) __snake_case: Union[str, Any] = encoder_input_tokens.shape[1] __snake_case: Dict = torch.arange(A , device=encoder_input_tokens.device ) x += self.position_encoding(A ) __snake_case: Optional[Any] = self.dropout_pre(A ) # inverted the attention mask __snake_case: Dict = encoder_input_tokens.size() __snake_case: str = self.get_extended_attention_mask(A , A ) for lyr in self.encoders: __snake_case: List[str] = lyr(A , A )[0] __snake_case: Any = self.layer_norm(A ) return self.dropout_post(A ), encoder_inputs_mask
155
0
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] UpperCamelCase = "fp16" self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE , variant=SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] UpperCamelCase = "fp16" self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE , variant=SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" # pass variant but use the non-variant filenames UpperCamelCase = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] UpperCamelCase = "fp16" self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE , variant=SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] UpperCamelCase = "fp16" self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE , variant=SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] UpperCamelCase = "fp16" self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE , variant=SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" # pass variant but use the non-variant filenames UpperCamelCase = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] UpperCamelCase = "fp16" self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE , variant=SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" UpperCamelCase = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] UpperCamelCase = "fp16" self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE , variant=SCREAMING_SNAKE_CASE ) )
606
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING __a : List[Any] = logging.get_logger(__name__) @add_end_docstrings(snake_case__ ) class __UpperCAmelCase ( snake_case__ ): """simple docstring""" def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) requires_backends(self , "decord" ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> List[str]: """simple docstring""" UpperCamelCase = {} if frame_sampling_rate is not None: UpperCamelCase = frame_sampling_rate if num_frames is not None: UpperCamelCase = num_frames UpperCamelCase = {} if top_k is not None: UpperCamelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 ) -> List[Any]: """simple docstring""" if num_frames is None: UpperCamelCase = self.model.config.num_frames if video.startswith("http://" ) or video.startswith("https://" ): UpperCamelCase = BytesIO(requests.get(SCREAMING_SNAKE_CASE ).content ) UpperCamelCase = VideoReader(SCREAMING_SNAKE_CASE ) videoreader.seek(0 ) UpperCamelCase = 0 UpperCamelCase = num_frames * frame_sampling_rate - 1 UpperCamelCase = np.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num=SCREAMING_SNAKE_CASE , dtype=np.intaa ) UpperCamelCase = videoreader.get_batch(SCREAMING_SNAKE_CASE ).asnumpy() UpperCamelCase = list(SCREAMING_SNAKE_CASE ) UpperCamelCase = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=self.framework ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> Optional[Any]: """simple docstring""" if top_k > self.model.config.num_labels: UpperCamelCase = self.model.config.num_labels if self.framework == "pt": UpperCamelCase = model_outputs.logits.softmax(-1 )[0] UpperCamelCase , UpperCamelCase = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCamelCase = scores.tolist() UpperCamelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
606
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
721
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case = { """configuration_distilbert""": [ """DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DistilBertConfig""", """DistilBertOnnxConfig""", ], """tokenization_distilbert""": ["""DistilBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""DistilBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DistilBertForMaskedLM""", """DistilBertForMultipleChoice""", """DistilBertForQuestionAnswering""", """DistilBertForSequenceClassification""", """DistilBertForTokenClassification""", """DistilBertModel""", """DistilBertPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDistilBertForMaskedLM""", """TFDistilBertForMultipleChoice""", """TFDistilBertForQuestionAnswering""", """TFDistilBertForSequenceClassification""", """TFDistilBertForTokenClassification""", """TFDistilBertMainLayer""", """TFDistilBertModel""", """TFDistilBertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """FlaxDistilBertForMaskedLM""", """FlaxDistilBertForMultipleChoice""", """FlaxDistilBertForQuestionAnswering""", """FlaxDistilBertForSequenceClassification""", """FlaxDistilBertForTokenClassification""", """FlaxDistilBertModel""", """FlaxDistilBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
117
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = """▁""" _lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} _lowerCamelCase = { """vocab_file""": { """facebook/mbart-large-50-one-to-many-mmt""": ( """https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model""" ), } } _lowerCamelCase = { """facebook/mbart-large-50-one-to-many-mmt""": 1024, } # fmt: off _lowerCamelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""] class _snake_case (__SCREAMING_SNAKE_CASE): __A : List[Any] =VOCAB_FILES_NAMES __A : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Dict =PRETRAINED_VOCAB_FILES_MAP __A : Dict =["input_ids", "attention_mask"] __A : List[int] =[] __A : List[int] =[] def __init__( self ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case="</s>" ,_snake_case="</s>" ,_snake_case="<s>" ,_snake_case="<unk>" ,_snake_case="<pad>" ,_snake_case="<mask>" ,_snake_case = None ,**_snake_case ,): # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : Optional[Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase_ : Optional[Any] = kwargs.get("additional_special_tokens" ,[] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_snake_case ,tgt_lang=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,mask_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,) UpperCAmelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_snake_case ) ) UpperCAmelCase_ : List[str] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token UpperCAmelCase_ : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : Union[str, Any] = len(self.sp_model ) UpperCAmelCase_ : int = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case ) } UpperCAmelCase_ : List[Any] = {v: k for k, v in self.lang_code_to_id.items()} UpperCAmelCase_ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) UpperCAmelCase_ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} UpperCAmelCase_ : Optional[int] = src_lang if src_lang is not None else "en_XX" UpperCAmelCase_ : List[Any] = self.lang_code_to_id[self._src_lang] UpperCAmelCase_ : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase__ ( self ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def UpperCamelCase__ ( self ): return self._src_lang @src_lang.setter def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ): UpperCAmelCase_ : List[Any] = self.__dict__.copy() UpperCAmelCase_ : List[str] = None return state def __setstate__( self ,_snake_case ): UpperCAmelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self ,_snake_case ): return self.sp_model.encode(_snake_case ,out_type=_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCAmelCase_ : Dict = self.sp_model.PieceToId(_snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase__ ( self ,_snake_case ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : int = "" UpperCAmelCase_ : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_snake_case ) + token UpperCAmelCase_ : int = True UpperCAmelCase_ : List[Any] = [] else: current_sub_tokens.append(_snake_case ) UpperCAmelCase_ : Any = False out_string += self.sp_model.decode(_snake_case ) return out_string.strip() def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): if not os.path.isdir(_snake_case ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : Optional[Any] = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case ,"wb" ) as fi: UpperCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case ) UpperCAmelCase_ : Union[str, Any] = [1] * len(self.prefix_tokens ) UpperCAmelCase_ : str = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_snake_case )) + suffix_ones return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCAmelCase_ : str = src_lang UpperCAmelCase_ : Dict = self(_snake_case ,add_special_tokens=_snake_case ,return_tensors=_snake_case ,**_snake_case ) UpperCAmelCase_ : List[str] = self.convert_tokens_to_ids(_snake_case ) UpperCAmelCase_ : Optional[Any] = tgt_lang_id return inputs def UpperCamelCase__ ( self ,_snake_case ,_snake_case = "en_XX" ,_snake_case = None ,_snake_case = "ro_RO" ,**_snake_case ,): UpperCAmelCase_ : Optional[Any] = src_lang UpperCAmelCase_ : int = tgt_lang return super().prepare_seqaseq_batch(_snake_case ,_snake_case ,**_snake_case ) def UpperCamelCase__ ( self ): return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase__ ( self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[Any] = self.lang_code_to_id[src_lang] UpperCAmelCase_ : List[str] = [self.cur_lang_code_id] UpperCAmelCase_ : Dict = [self.eos_token_id] def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : int = self.lang_code_to_id[tgt_lang] UpperCAmelCase_ : Optional[Any] = [self.cur_lang_code_id] UpperCAmelCase_ : int = [self.eos_token_id]
71
'''simple docstring''' from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class UpperCAmelCase_ : """simple docstring""" pass
173
0
def lowercase_ ( _A : int ): """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("Program to check whether a number is a Perfect number or not...") A : int = int(input("Enter number: ").strip()) print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset snake_case = random.Random() def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> List[str]: if rng is None: _snake_case = global_rng _snake_case = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase ( unittest.TestCase ): def __init__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : List[Any]=4_0_0 , __lowerCamelCase : Any=2_0_0_0 , __lowerCamelCase : Any=2_0_4_8 , __lowerCamelCase : Any=1_2_8 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=5_1_2 , __lowerCamelCase : Tuple=3_0 , __lowerCamelCase : List[Any]=4_4_1_0_0 , ): """simple docstring""" _snake_case = parent _snake_case = batch_size _snake_case = min_seq_length _snake_case = max_seq_length _snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _snake_case = spectrogram_length _snake_case = feature_size _snake_case = num_audio_channels _snake_case = hop_length _snake_case = chunk_length _snake_case = sampling_rate def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any=False , __lowerCamelCase : int=False ): """simple docstring""" def _flatten(__lowerCamelCase : List[str] ): return list(itertools.chain(*__lowerCamelCase ) ) if equal_length: _snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _snake_case = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _snake_case = [np.asarray(__lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ): A__ : Tuple = TvltFeatureExtractor def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = TvltFeatureExtractionTester(self ) def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(__lowerCamelCase , '''spectrogram_length''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''feature_size''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''num_audio_channels''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''hop_length''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''chunk_length''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''sampling_rate''' ) ) def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = feat_extract_first.save_pretrained(__lowerCamelCase )[0] check_json_file_has_correct_format(__lowerCamelCase ) _snake_case = self.feature_extraction_class.from_pretrained(__lowerCamelCase ) _snake_case = feat_extract_first.to_dict() _snake_case = feat_extract_second.to_dict() _snake_case = dict_first.pop('''mel_filters''' ) _snake_case = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = os.path.join(__lowerCamelCase , '''feat_extract.json''' ) feat_extract_first.to_json_file(__lowerCamelCase ) _snake_case = self.feature_extraction_class.from_json_file(__lowerCamelCase ) _snake_case = feat_extract_first.to_dict() _snake_case = feat_extract_second.to_dict() _snake_case = dict_first.pop('''mel_filters''' ) _snake_case = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" # Initialize feature_extractor _snake_case = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] _snake_case = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input _snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _snake_case = feature_extractor(__lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _snake_case = feature_extractor( __lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=__lowerCamelCase ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _snake_case = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] _snake_case = np.asarray(__lowerCamelCase ) _snake_case = feature_extractor(__lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] ): """simple docstring""" _snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech _snake_case = ds.sort('''id''' ).select(range(__lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = self._load_datasamples(1 ) _snake_case = TvltFeatureExtractor() _snake_case = feature_extractor(__lowerCamelCase , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) ) _snake_case = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowerCamelCase , atol=1E-4 ) )
103
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar snake_case = TypeVar("T") class __A ( Generic[T] ): '''simple docstring''' a_ = 42 # Cache store of keys a_ = 42 # References of the keys in cache a_ = 10 # Maximum capacity of cache def __init__( self , _snake_case ): _lowerCAmelCase : Tuple = deque() _lowerCAmelCase : List[Any] = set() if not n: _lowerCAmelCase : Any = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: _lowerCAmelCase : List[str] = n def SCREAMING_SNAKE_CASE__ ( self , _snake_case ): if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: _lowerCAmelCase : Optional[int] = self.dq_store.pop() self.key_reference.remove(_snake_case ) else: self.dq_store.remove(_snake_case ) self.dq_store.appendleft(_snake_case ) self.key_reference.add(_snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): for k in self.dq_store: print(_snake_case ) def __repr__( self ): return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() snake_case = LRUCache(4) lru_cache.refer("A") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("A") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
424
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _A: str = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A: Optional[int] = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A: int = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A: Any = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _A: Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
703
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu _A: Union[str, Any] = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: _A: Optional[Any] = json.load(f) @require_torch class UpperCAmelCase ( unittest.TestCase ): def __lowerCamelCase ( self , __A ): return FSMTTokenizer.from_pretrained(__A ) def __lowerCamelCase ( self , __A ): __UpperCAmelCase = FSMTForConditionalGeneration.from_pretrained(__A ).to(__A ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 2_6.0], ['ru-en', 2_2.0], ['en-de', 2_2.0], ['de-en', 2_9.0], ] ) @slow def __lowerCamelCase ( self , __A , __A ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality __UpperCAmelCase = f'facebook/wmt19-{pair}' __UpperCAmelCase = self.get_tokenizer(__A ) __UpperCAmelCase = self.get_model(__A ) __UpperCAmelCase = bleu_data[pair]['src'] __UpperCAmelCase = bleu_data[pair]['tgt'] __UpperCAmelCase = tokenizer(__A , return_tensors='pt' , truncation=__A , padding='longest' ).to(__A ) __UpperCAmelCase = model.generate( input_ids=batch.input_ids , num_beams=8 , ) __UpperCAmelCase = tokenizer.batch_decode( __A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) __UpperCAmelCase = calculate_bleu(__A , __A ) print(__A ) self.assertGreaterEqual(scores['bleu'] , __A )
617
0
'''simple docstring''' import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __a ( _snake_case, unittest.TestCase ): # TODO: is there an appropriate internal test set? __UpperCamelCase : Optional[Any] = 'ssube/stable-diffusion-x4-upscaler-onnx' def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[int]=0 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 128, 128) ,rng=random.Random(lowerCamelCase ) ) __SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase ) __SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs() __SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) __SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs() __SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) __SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs() __SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) __SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs() __SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) __SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs() __SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __a ( unittest.TestCase ): @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ort.SessionOptions() __SCREAMING_SNAKE_CASE = False return options def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __SCREAMING_SNAKE_CASE = init_image.resize((128, 128) ) # using the PNDM scheduler by default __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=lowerCamelCase ,image=lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase ,output_type="""np""" ,) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __SCREAMING_SNAKE_CASE = init_image.resize((128, 128) ) __SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" ,subfolder="""scheduler""" ) __SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" ,scheduler=lowerCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=lowerCamelCase ,image=lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=lowerCamelCase ,output_type="""np""" ,) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
109
from __future__ import annotations from fractions import Fraction def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def lowerCAmelCase_ ( lowerCamelCase ): __magic_name__ : List[str] =[] __magic_name__ : List[Any] =11 __magic_name__ : Tuple =int("""1""" + """0""" * digit_len ) for num in range(lowerCamelCase , lowerCamelCase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowerCamelCase , lowerCamelCase ): solutions.append(F"{num}/{den}" ) den += 1 num += 1 __magic_name__ : List[str] =10 return solutions def lowerCAmelCase_ ( lowerCamelCase = 2 ): __magic_name__ : str =1.0 for fraction in fraction_list(lowerCamelCase ): __magic_name__ : int =Fraction(lowerCamelCase ) result *= frac.denominator / frac.numerator return int(lowerCamelCase ) if __name__ == "__main__": print(solution())
21
0
import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging _A = logging.get_logger(__name__) def _UpperCAmelCase ( ): # Get the sagemaker specific mp parameters from smp_options variable. __UpperCamelCase =os.getenv('SM_HP_MP_PARAMETERS' , '{}' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. __UpperCamelCase =json.loads(SCREAMING_SNAKE_CASE__ ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. __UpperCamelCase =os.getenv('SM_FRAMEWORK_PARAMS' , '{}' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". __UpperCamelCase =json.loads(SCREAMING_SNAKE_CASE__ ) if not mpi_options.get('sagemaker_mpi_enabled' , SCREAMING_SNAKE_CASE__ ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('smdistributed' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : str = field( default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , ) def _a ( self ) -> Optional[Any]: super().__post_init__() warnings.warn( '`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ' '`TrainingArguments` instead.' , A_ , ) @cached_property def _a ( self ) -> "torch.device": logger.info('PyTorch: setting up devices' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( 'torch.distributed process group is initialized, but local_rank == -1. ' 'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' ) if self.no_cuda: __UpperCamelCase =torch.device('cpu' ) __UpperCamelCase =0 elif is_sagemaker_model_parallel_available(): __UpperCamelCase =smp.local_rank() __UpperCamelCase =torch.device('cuda' , A_ ) __UpperCamelCase =1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta ) __UpperCamelCase =int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) ) __UpperCamelCase =torch.device('cuda' , self.local_rank ) __UpperCamelCase =1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 __UpperCamelCase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. __UpperCamelCase =torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta ) __UpperCamelCase =torch.device('cuda' , self.local_rank ) __UpperCamelCase =1 if device.type == "cuda": torch.cuda.set_device(A_ ) return device @property def _a ( self ) -> List[Any]: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def _a ( self ) -> int: return not is_sagemaker_model_parallel_available() @property def _a ( self ) -> List[str]: return False
717
import math from collections.abc import Callable def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ): __UpperCamelCase =xa __UpperCamelCase =xa while True: if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ): raise ZeroDivisionError('float division by zero, could not find root' ) __UpperCamelCase =x_na - ( function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na __UpperCamelCase =x_na __UpperCamelCase =x_na def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ): return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
682
0
import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( snake_case__ , unittest.TestCase ): '''simple docstring''' UpperCamelCase__ : List[str] = XLMTokenizer UpperCamelCase__ : str = False def _A ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __SCREAMING_SNAKE_CASE = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __SCREAMING_SNAKE_CASE = dict(zip(A_ , range(len(A_ ) ) ) ) __SCREAMING_SNAKE_CASE = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(A_ ) ) def _A ( self , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 'lower newer' __SCREAMING_SNAKE_CASE = 'lower newer' return input_text, output_text def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = XLMTokenizer(self.vocab_file , self.merges_file ) __SCREAMING_SNAKE_CASE = 'lower' __SCREAMING_SNAKE_CASE = ['low', 'er</w>'] __SCREAMING_SNAKE_CASE = tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) __SCREAMING_SNAKE_CASE = tokens + ['<unk>'] __SCREAMING_SNAKE_CASE = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) @slow def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' ) __SCREAMING_SNAKE_CASE = tokenizer.encode('sequence builders' , add_special_tokens=A_ ) __SCREAMING_SNAKE_CASE = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A_ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A_ , A_ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
148
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __UpperCAmelCase ( snake_case__ ): """simple docstring""" _snake_case : int = 'dpr' def __init__( self : Tuple , A_ : List[str]=3_05_22 , A_ : Tuple=7_68 , A_ : str=12 , A_ : int=12 , A_ : Dict=30_72 , A_ : Optional[int]="gelu" , A_ : Tuple=0.1 , A_ : List[str]=0.1 , A_ : int=5_12 , A_ : List[str]=2 , A_ : int=0.02 , A_ : Tuple=1e-1_2 , A_ : List[Any]=0 , A_ : List[str]="absolute" , A_ : int = 0 , **A_ : Union[str, Any] , )-> Any: super().__init__(pad_token_id=A_ , **A_ ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = projection_dim __UpperCamelCase = position_embedding_type
505
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""", # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : str = "pegasus" __snake_case : List[Any] = ["past_key_values"] __snake_case : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict ,lowerCamelCase__ : List[str]=50265 ,lowerCamelCase__ : int=1024 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : str=4096 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]="gelu" ,lowerCamelCase__ : Tuple=1024 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : Union[str, Any]=0 ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : Any=1 ,**lowerCamelCase__ : Dict ,) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = encoder_ffn_dim SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = encoder_attention_heads SCREAMING_SNAKE_CASE = decoder_ffn_dim SCREAMING_SNAKE_CASE = decoder_layers SCREAMING_SNAKE_CASE = decoder_attention_heads SCREAMING_SNAKE_CASE = dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = init_std SCREAMING_SNAKE_CASE = encoder_layerdrop SCREAMING_SNAKE_CASE = decoder_layerdrop SCREAMING_SNAKE_CASE = use_cache SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,forced_eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,) @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: '''simple docstring''' return self.d_model
720
from ..utils import DummyObject, requires_backends class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = ["sentencepiece"] def __init__( self : int ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Any ) -> str: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = ["sentencepiece"] def __init__( self : Any ,*lowerCamelCase__ : str ,**lowerCamelCase__ : List[Any] ) -> str: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : str = ["sentencepiece"] def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Optional[int] ) -> Optional[int]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = ["sentencepiece"] def __init__( self : Tuple ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Any ) -> Union[str, Any]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = ["sentencepiece"] def __init__( self : Optional[int] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ) -> str: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[str] = ["sentencepiece"] def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Optional[Any] ) -> Tuple: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[str] = ["sentencepiece"] def __init__( self : int ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : str ) -> Optional[int]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = ["sentencepiece"] def __init__( self : Union[str, Any] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[Any] ) -> int: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[Any] = ["sentencepiece"] def __init__( self : List[str] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : int ) -> Tuple: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = ["sentencepiece"] def __init__( self : str ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : List[str] ) -> Optional[int]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Any = ["sentencepiece"] def __init__( self : int ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : List[Any] ) -> List[str]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Any = ["sentencepiece"] def __init__( self : Dict ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Tuple = ["sentencepiece"] def __init__( self : Dict ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[Any] = ["sentencepiece"] def __init__( self : Optional[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : int ) -> Any: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Tuple = ["sentencepiece"] def __init__( self : Optional[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Dict ) -> Union[str, Any]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[Any] = ["sentencepiece"] def __init__( self : int ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Optional[Any] ) -> Dict: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = ["sentencepiece"] def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : str ) -> List[Any]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = ["sentencepiece"] def __init__( self : int ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Dict ) -> List[Any]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : int = ["sentencepiece"] def __init__( self : List[Any] ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Any ) -> int: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = ["sentencepiece"] def __init__( self : Dict ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : str ) -> Tuple: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : str = ["sentencepiece"] def __init__( self : Union[str, Any] ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Any ) -> List[str]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Any = ["sentencepiece"] def __init__( self : Dict ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ) -> int: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : List[str] = ["sentencepiece"] def __init__( self : Optional[int] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : str = ["sentencepiece"] def __init__( self : Optional[Any] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : List[Any] ) -> Any: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[Any] = ["sentencepiece"] def __init__( self : List[str] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ) -> Union[str, Any]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[int] = ["sentencepiece"] def __init__( self : Optional[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ) -> Dict: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Tuple = ["sentencepiece"] def __init__( self : Any ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Any ) -> Optional[int]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = ["sentencepiece"] def __init__( self : Union[str, Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : List[Any] ) -> str: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : str = ["sentencepiece"] def __init__( self : int ,*lowerCamelCase__ : str ,**lowerCamelCase__ : str ) -> Dict: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = ["sentencepiece"] def __init__( self : int ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ) -> List[str]: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ): '''simple docstring''' __snake_case : Union[str, Any] = ["sentencepiece"] def __init__( self : List[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Dict ) -> Dict: '''simple docstring''' requires_backends(self ,["""sentencepiece"""] )
116
0
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version snake_case_ : Any = get_logger(__name__) class lowercase__ : lowercase__ = """dummy_data""" lowercase__ = """datasets""" lowercase__ = False def __init__( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[Version, str] ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[List[Callable]] = None ,): '''simple docstring''' _UpperCamelCase : Tuple = 0 _UpperCamelCase : Union[str, Any] = dataset_name _UpperCamelCase : str = cache_dir _UpperCamelCase : int = use_local_dummy_data _UpperCamelCase : Any = config # download_callbacks take a single url as input _UpperCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _UpperCamelCase : Optional[Any] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _UpperCamelCase : Tuple = str(lowerCamelCase__ ) # to be downloaded _UpperCamelCase : str = None _UpperCamelCase : Any = None @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if self._dummy_file is None: _UpperCamelCase : Any = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join('dummy' ,self.version_name ) @property def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,'dummy_data.zip' ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' _UpperCamelCase : List[Any] = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _UpperCamelCase : int = cached_path( lowerCamelCase__ ,cache_dir=self.cache_dir ,extract_compressed_file=lowerCamelCase__ ,force_extract=lowerCamelCase__ ) return os.path.join(lowerCamelCase__ ,self.dummy_file_name ) @property def UpperCamelCase_ ( self : Any ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' if self._bucket_url is None: _UpperCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,'/' ) ) return self._bucket_url @property def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,'/' ).split('/' )[:-1] ) def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Any ,*lowerCamelCase__ : str ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _UpperCamelCase : Optional[Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _UpperCamelCase : Tuple = self.dummy_file_name # special case when data_url is a dict if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): return self.create_dummy_data_dict(lowerCamelCase__ ,lowerCamelCase__ ) elif isinstance(lowerCamelCase__ ,(list, tuple) ): return self.create_dummy_data_list(lowerCamelCase__ ,lowerCamelCase__ ) else: return self.create_dummy_data_single(lowerCamelCase__ ,lowerCamelCase__ ) def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : int ,*lowerCamelCase__ : Tuple ): '''simple docstring''' return self.download_and_extract(lowerCamelCase__ ) def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ): '''simple docstring''' return self.download_and_extract(lowerCamelCase__ ) def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : int ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ): '''simple docstring''' return path def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' return {} def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ): '''simple docstring''' _UpperCamelCase : List[Any] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): for single_url in single_urls: download_callback(lowerCamelCase__ ) else: _UpperCamelCase : Tuple = single_urls download_callback(lowerCamelCase__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : Dict = [os.path.join(lowerCamelCase__ ,urllib.parse.quote_plus(Path(lowerCamelCase__ ).name ) ) for x in single_urls] else: _UpperCamelCase : str = single_urls _UpperCamelCase : List[Any] = os.path.join(lowerCamelCase__ ,urllib.parse.quote_plus(Path(lowerCamelCase__ ).name ) ) _UpperCamelCase : int = value # make sure that values are unique if all(isinstance(lowerCamelCase__ ,lowerCamelCase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _UpperCamelCase : Optional[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str ): '''simple docstring''' _UpperCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _UpperCamelCase : str = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' ,lowerCamelCase__ ) ) for url in data_url ) _UpperCamelCase : Union[str, Any] = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _UpperCamelCase : Optional[Any] = [data_url[0]] * len(lowerCamelCase__ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowerCamelCase__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _UpperCamelCase : Dict = os.path.join(lowerCamelCase__ ,urllib.parse.quote_plus(single_url.split('/' )[-1] ) ) dummy_data_list.append(lowerCamelCase__ ) return dummy_data_list def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(lowerCamelCase__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _UpperCamelCase : List[Any] = os.path.join(lowerCamelCase__ ,urllib.parse.quote_plus(data_url.split('/' )[-1] ) ) if os.path.exists(lowerCamelCase__ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' pass def UpperCamelCase_ ( self : int ): '''simple docstring''' pass def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ): '''simple docstring''' def _iter_archive_members(lowerCamelCase__ : Optional[Any] ): # this preserves the order of the members inside the ZIP archive _UpperCamelCase : List[Any] = Path(self.dummy_file ).parent _UpperCamelCase : Any = path.relative_to(lowerCamelCase__ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _UpperCamelCase : int = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowerCamelCase__ ) _UpperCamelCase : Dict = Path(lowerCamelCase__ ) _UpperCamelCase : List[str] = _iter_archive_members(lowerCamelCase__ ) if self.use_local_dummy_data else path.rglob('*' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__') ): yield file_path.relative_to(lowerCamelCase__ ).as_posix(), file_path.open('rb' ) def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Union[str, Any] ): '''simple docstring''' if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : str = [paths] for path in paths: if os.path.isfile(lowerCamelCase__ ): if os.path.basename(lowerCamelCase__ ).startswith(('.', '__') ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowerCamelCase__ ): if os.path.basename(lowerCamelCase__ ).startswith(('.', '__') ): continue dirnames.sort() for filename in sorted(lowerCamelCase__ ): if filename.startswith(('.', '__') ): continue yield os.path.join(lowerCamelCase__ ,lowerCamelCase__ )
195
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar snake_case_ : Optional[Any] = TypeVar('KEY') snake_case_ : Dict = TypeVar('VAL') @dataclass(frozen=lowercase , slots=lowercase ) class lowercase__ ( Generic[KEY, VAL] ): lowercase__ = 42 lowercase__ = 42 class lowercase__ ( _Item ): def __init__( self : Union[str, Any] ): '''simple docstring''' super().__init__(lowerCamelCase__ ,lowerCamelCase__ ) def __bool__( self : Tuple ): '''simple docstring''' return False snake_case_ : int = _DeletedItem() class lowercase__ ( MutableMapping[KEY, VAL] ): def __init__( self : Any ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : float = 0.7_5 ): '''simple docstring''' _UpperCamelCase : Union[str, Any] = initial_block_size _UpperCamelCase : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 _UpperCamelCase : Optional[int] = capacity_factor _UpperCamelCase : List[Any] = 0 def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : KEY ): '''simple docstring''' return hash(lowerCamelCase__ ) % len(self._buckets ) def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : int ): '''simple docstring''' return (ind + 1) % len(self._buckets ) def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : KEY ,lowerCamelCase__ : VAL ): '''simple docstring''' _UpperCamelCase : str = self._buckets[ind] if not stored: _UpperCamelCase : str = _Item(lowerCamelCase__ ,lowerCamelCase__ ) self._len += 1 return True elif stored.key == key: _UpperCamelCase : Union[str, Any] = _Item(lowerCamelCase__ ,lowerCamelCase__ ) return True else: return False def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' _UpperCamelCase : Dict = len(self._buckets ) * self._capacity_factor return len(self ) >= int(lowerCamelCase__ ) def UpperCamelCase_ ( self : Dict ): '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False _UpperCamelCase : str = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def UpperCamelCase_ ( self : int ,lowerCamelCase__ : int ): '''simple docstring''' _UpperCamelCase : int = self._buckets _UpperCamelCase : Dict = [None] * new_size _UpperCamelCase : str = 0 for item in old_buckets: if item: self._add_item(item.key ,item.val ) def UpperCamelCase_ ( self : int ): '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : KEY ): '''simple docstring''' _UpperCamelCase : List[str] = self._get_bucket_index(lowerCamelCase__ ) for _ in range(len(self._buckets ) ): yield ind _UpperCamelCase : Optional[int] = self._get_next_ind(lowerCamelCase__ ) def UpperCamelCase_ ( self : str ,lowerCamelCase__ : KEY ,lowerCamelCase__ : VAL ): '''simple docstring''' for ind in self._iterate_buckets(lowerCamelCase__ ): if self._try_set(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ): break def __setitem__( self : Dict ,lowerCamelCase__ : KEY ,lowerCamelCase__ : VAL ): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(lowerCamelCase__ ,lowerCamelCase__ ) def __delitem__( self : Optional[Any] ,lowerCamelCase__ : KEY ): '''simple docstring''' for ind in self._iterate_buckets(lowerCamelCase__ ): _UpperCamelCase : Any = self._buckets[ind] if item is None: raise KeyError(lowerCamelCase__ ) if item is _deleted: continue if item.key == key: _UpperCamelCase : Optional[Any] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Dict ,lowerCamelCase__ : KEY ): '''simple docstring''' for ind in self._iterate_buckets(lowerCamelCase__ ): _UpperCamelCase : Any = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(lowerCamelCase__ ) def __len__( self : Dict ): '''simple docstring''' return self._len def __iter__( self : List[str] ): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self : Union[str, Any] ): '''simple docstring''' _UpperCamelCase : Tuple = ' ,'.join( F'{item.key}: {item.val}' for item in self._buckets if item ) return F'HashMap({val_string})'
195
1
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker A__ : int ='CompVis/stable-diffusion-v1-1' A__ : Tuple ='CompVis/stable-diffusion-v1-2' A__ : Optional[Any] ='CompVis/stable-diffusion-v1-3' A__ : Optional[Any] ='CompVis/stable-diffusion-v1-4' class __A ( _SCREAMING_SNAKE_CASE ): def __init__( self : List[str] , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , lowerCamelCase : bool = True , ): """simple docstring""" super()._init_() __A : List[Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase ) __A : Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase ) __A : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase ) __A : List[Any] = StableDiffusionPipeline( vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , requires_safety_checker=lowerCamelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def lowercase_( self : Tuple ): """simple docstring""" return {k: getattr(self , lowerCamelCase ) for k in self.config.keys() if not k.startswith("""_""" )} def lowercase_( self : List[str] , lowerCamelCase : Optional[Union[str, int]] = "auto" ): """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __A : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCamelCase ) def lowercase_( self : Tuple ): """simple docstring""" self.enable_attention_slicing(lowerCamelCase ) @torch.no_grad() def lowercase_( self : Optional[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Union[str, Any] , ): """simple docstring""" return self.pipea( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) @torch.no_grad() def lowercase_( self : Any , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : str , ): """simple docstring""" return self.pipea( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) @torch.no_grad() def lowercase_( self : Any , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Optional[int] , ): """simple docstring""" return self.pipea( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) @torch.no_grad() def lowercase_( self : List[str] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Any , ): """simple docstring""" return self.pipea( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) @torch.no_grad() def lowercase_( self : Dict , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : str , ): """simple docstring""" __A : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu""" self.to(lowerCamelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." ) # Get first result from Stable Diffusion Checkpoint v1.1 __A : List[Any] = self.textaimg_sda_a( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 __A : Optional[int] = self.textaimg_sda_a( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 __A : Dict = self.textaimg_sda_a( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 __A : Dict = self.textaimg_sda_a( prompt=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , **lowerCamelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
499
'''simple docstring''' def A_ ( __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __A : Tuple = generate_pascal_triangle(__SCREAMING_SNAKE_CASE ) for row_idx in range(__SCREAMING_SNAKE_CASE ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def A_ ( __SCREAMING_SNAKE_CASE : int ) -> list[list[int]]: """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) __A : list[list[int]] = [] for current_row_idx in range(__SCREAMING_SNAKE_CASE ): __A : Tuple = populate_current_row(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) triangle.append(__SCREAMING_SNAKE_CASE ) return triangle def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ) -> list[int]: """simple docstring""" __A : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 __A , __A : Tuple = 1, 1 for current_col_idx in range(1 , __SCREAMING_SNAKE_CASE ): calculate_current_element( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return current_row def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , ) -> None: """simple docstring""" __A : str = triangle[current_row_idx - 1][current_col_idx - 1] __A : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx] __A : Any = above_to_left_elt + above_to_right_elt def A_ ( __SCREAMING_SNAKE_CASE : int ) -> list[list[int]]: """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) __A : list[list[int]] = [[1]] for row_index in range(1 , __SCREAMING_SNAKE_CASE ): __A : Optional[Any] = [0] + result[-1] + [0] __A : Optional[Any] = row_index + 1 # Calculate the number of distinct elements in a row __A : int = sum(divmod(__SCREAMING_SNAKE_CASE , 2 ) ) __A : str = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] __A : Tuple = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() __A : Any = row_first_half + row_second_half result.append(__SCREAMING_SNAKE_CASE ) return result def A_ ( ) -> None: """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(__SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : int ) -> None: __A : List[Any] = F"{func.__name__}({value})" __A : Dict = timeit(F"__main__.{call}" , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
499
1
"""simple docstring""" from __future__ import annotations lowerCamelCase__ = 10 def _SCREAMING_SNAKE_CASE ( UpperCamelCase : list[int] ): A__ = 1 A__ = max(UpperCamelCase ) while placement <= max_digit: # declare and initialize empty buckets A__ = [[] for _ in range(UpperCamelCase )] # split list_of_ints between the buckets for i in list_of_ints: A__ = int((i / placement) % RADIX ) buckets[tmp].append(UpperCamelCase ) # put each buckets' contents into list_of_ints A__ = 0 for b in range(UpperCamelCase ): for i in buckets[b]: A__ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
574
"""simple docstring""" import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _UpperCamelCase ( unittest.TestCase): def __init__(self , lowerCamelCase__ , lowerCamelCase__=1_0_0 , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , ): """simple docstring""" A__ = parent A__ = vocab_size A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def A (self ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) return config, pixel_values, labels def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" A__ = FlaxBeitModel(config=lowerCamelCase__ ) A__ = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" A__ = FlaxBeitForMaskedImageModeling(config=lowerCamelCase__ ) A__ = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" A__ = self.type_sequence_label_size A__ = FlaxBeitForImageClassification(config=lowerCamelCase__ ) A__ = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = FlaxBeitForImageClassification(lowerCamelCase__ ) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = self.prepare_config_and_inputs() ( ( A__ ) ,( A__ ) ,( A__ ) , ) = config_and_inputs A__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class _UpperCamelCase ( __snake_case , unittest.TestCase): __lowerCamelCase = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def A (self ): """simple docstring""" A__ = FlaxBeitModelTester(self ) A__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 ) def A (self ): """simple docstring""" self.config_tester.run_common_tests() def A (self ): """simple docstring""" A__ ,A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowerCamelCase__ ) A__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def A (self ): """simple docstring""" A__ ,A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) A__ = model_class(lowerCamelCase__ ) @jax.jit def model_jitted(lowerCamelCase__ , **lowerCamelCase__ ): return model(pixel_values=lowerCamelCase__ , **lowerCamelCase__ ) with self.subTest("""JIT Enabled""" ): A__ = model_jitted(**lowerCamelCase__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): A__ = model_jitted(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) ) for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def A (self ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) @slow def A (self ): """simple docstring""" for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" ) A__ = model(np.ones((1, 3, 2_2_4, 2_2_4) ) ) self.assertIsNotNone(lowerCamelCase__ ) def _SCREAMING_SNAKE_CASE ( ): A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @require_flax class _UpperCamelCase ( unittest.TestCase): @cached_property def A (self ): """simple docstring""" return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def A (self ): """simple docstring""" A__ = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowerCamelCase__ , return_tensors="""np""" ).pixel_values # prepare bool_masked_pos A__ = np.ones((1, 1_9_6) , dtype=lowerCamelCase__ ) # forward pass A__ = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) A__ = outputs.logits # verify the logits A__ = (1, 1_9_6, 8_1_9_2) self.assertEqual(logits.shape , lowerCamelCase__ ) A__ = np.array( [[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def A (self ): """simple docstring""" A__ = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowerCamelCase__ , return_tensors="""np""" ) # forward pass A__ = model(**lowerCamelCase__ ) A__ = outputs.logits # verify the logits A__ = (1, 1_0_0_0) self.assertEqual(logits.shape , lowerCamelCase__ ) A__ = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ) self.assertTrue(np.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) A__ = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def A (self ): """simple docstring""" A__ = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowerCamelCase__ , return_tensors="""np""" ) # forward pass A__ = model(**lowerCamelCase__ ) A__ = outputs.logits # verify the logits A__ = (1, 2_1_8_4_1) self.assertEqual(logits.shape , lowerCamelCase__ ) A__ = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ) self.assertTrue(np.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) A__ = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
574
1
"""simple docstring""" import argparse from collections import defaultdict import yaml lowerCAmelCase__ = 'docs/source/en/_toctree.yml' def _lowerCamelCase ( __a ): SCREAMING_SNAKE_CASE_ = defaultdict(__a ) for doc in model_doc: counts[doc["local"]] += 1 SCREAMING_SNAKE_CASE_ = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE_ = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(__a ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(__a, key=lambda __a : s["title"].lower() ) def _lowerCamelCase ( __a=False ): with open(__a, encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE_ = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE_ = content[api_idx]['''sections'''] # Then to the model doc SCREAMING_SNAKE_CASE_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 SCREAMING_SNAKE_CASE_ = api_doc[model_idx]['''sections'''] SCREAMING_SNAKE_CASE_ = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section] SCREAMING_SNAKE_CASE_ = False for idx, modality_doc in modalities_docs: SCREAMING_SNAKE_CASE_ = modality_doc['''sections'''] SCREAMING_SNAKE_CASE_ = clean_model_doc_toc(__a ) if old_modality_doc != new_modality_doc: SCREAMING_SNAKE_CASE_ = True if overwrite: SCREAMING_SNAKE_CASE_ = new_modality_doc if diff: if overwrite: SCREAMING_SNAKE_CASE_ = model_doc SCREAMING_SNAKE_CASE_ = api_doc with open(__a, '''w''', encoding='''utf-8''' ) as f: f.write(yaml.dump(__a, allow_unicode=__a ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowerCAmelCase__ = parser.parse_args() check_model_doc(args.fix_and_overwrite)
628
"""simple docstring""" def _lowerCamelCase ( __a ): if not isinstance(__a, __a ): SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer' raise TypeError(__a ) if number < 1: SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0' raise ValueError(__a ) SCREAMING_SNAKE_CASE_ = 1 for i in range(1, __a ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
628
1
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class snake_case ( __lowerCamelCase ): """simple docstring""" @staticmethod @abstractmethod def _lowerCamelCase ( __A : ArgumentParser ): raise NotImplementedError() @abstractmethod def _lowerCamelCase ( self : Optional[Any] ): raise NotImplementedError()
399
'''simple docstring''' import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def lowercase__ ( __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict=None , __lowercase : Tuple=None , __lowercase : Optional[int]=None , __lowercase : Tuple=None , __lowercase : int=None , ) -> Dict: """simple docstring""" if attention_mask is None: __UpperCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __UpperCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __UpperCamelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowercase ) if decoder_head_mask is None: __UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowercase ) if cross_attn_head_mask is None: __UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowercase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : Any , __A : Optional[int]=1_3 , __A : Dict=7 , __A : Union[str, Any]=True , __A : Optional[Any]=False , __A : List[Any]=9_9 , __A : str=1_6 , __A : str=2 , __A : List[str]=4 , __A : Optional[Any]=4 , __A : List[Any]="relu" , __A : List[str]=0.1 , __A : Union[str, Any]=0.1 , __A : Dict=0.0 , __A : Tuple=0.0 , __A : str=2_0 , __A : Dict=2 , __A : Dict=1 , __A : Any=0 , ): __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def _lowerCamelCase ( self : str ): __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.eos_token_id # Eos Token __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __UpperCamelCase = self.get_config() __UpperCamelCase = prepare_mam_aaa_inputs_dict(__A , __A , __A ) return config, inputs_dict def _lowerCamelCase ( self : Tuple ): return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def _lowerCamelCase ( self : int ): __UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def _lowerCamelCase ( self : Any , __A : str , __A : int ): __UpperCamelCase = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval() __UpperCamelCase = inputs_dict['input_ids'] __UpperCamelCase = inputs_dict['attention_mask'] __UpperCamelCase = inputs_dict['head_mask'] # first forward pass __UpperCamelCase = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A ) __UpperCamelCase , __UpperCamelCase = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __UpperCamelCase = model(__A , attention_mask=__A )['last_hidden_state'] __UpperCamelCase = model(__A , attention_mask=__A , past_key_values=__A )[ 'last_hidden_state' ] # select random slice __UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) ) def _lowerCamelCase ( self : List[Any] , __A : Tuple , __A : str ): __UpperCamelCase = MaMaaaModel(config=__A ).to(__A ).eval() __UpperCamelCase = model(**__A ) __UpperCamelCase = outputs.encoder_last_hidden_state __UpperCamelCase = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase = model.get_encoder() encoder.save_pretrained(__A ) __UpperCamelCase = MaMaaaEncoder.from_pretrained(__A ).to(__A ) __UpperCamelCase = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase = model.get_decoder() decoder.save_pretrained(__A ) __UpperCamelCase = MaMaaaDecoder.from_pretrained(__A ).to(__A ) __UpperCamelCase = decoder( input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict['attention_mask'] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =(MaMaaaForConditionalGeneration,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ : Optional[Any] =( { "conversational": MaMaaaForConditionalGeneration, "feature-extraction": MaMaaaModel, "summarization": MaMaaaForConditionalGeneration, "text2text-generation": MaMaaaForConditionalGeneration, "translation": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : str =True SCREAMING_SNAKE_CASE_ : Optional[int] =True SCREAMING_SNAKE_CASE_ : str =False SCREAMING_SNAKE_CASE_ : str =False def _lowerCamelCase ( self : Tuple , __A : Tuple , __A : List[str] , __A : Tuple , __A : Dict , __A : List[str] ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = MaMaaaModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__A ) def _lowerCamelCase ( self : List[Any] ): self.config_tester.run_common_tests() def _lowerCamelCase ( self : Tuple ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__A ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A ) __UpperCamelCase , __UpperCamelCase = model_class.from_pretrained(__A , output_loading_info=__A ) self.assertEqual(info['missing_keys'] , [] ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): __UpperCamelCase = model_class(__A ) model.to(__A ) model.eval() __UpperCamelCase = copy.deepcopy(self._prepare_for_class(__A , __A ) ) if not self.is_encoder_decoder: __UpperCamelCase = inputs['input_ids'] del inputs["input_ids"] else: __UpperCamelCase = inputs['input_ids'] __UpperCamelCase = inputs.get('decoder_input_ids' , __A ) del inputs["input_ids"] inputs.pop('decoder_input_ids' , __A ) __UpperCamelCase = model.get_input_embeddings() if not self.is_encoder_decoder: __UpperCamelCase = wte(__A ) else: __UpperCamelCase = wte(__A ) __UpperCamelCase = wte(__A ) with torch.no_grad(): model(**__A )[0] def _lowerCamelCase ( self : List[str] ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs() __UpperCamelCase = input_dict['input_ids'] __UpperCamelCase = input_ids.ne(1 ).to(__A ) __UpperCamelCase = MaMaaaForConditionalGeneration(__A ).eval().to(__A ) if torch_device == "cuda": model.half() model.generate(__A , attention_mask=__A ) model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 ) def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" return torch.tensor(__lowercase , dtype=torch.long , device=__lowercase ) a__ : str =1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCamelCase ( self : Optional[Any] ): return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ) def _lowerCamelCase ( self : str ): __UpperCamelCase = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(__A ) __UpperCamelCase = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) __UpperCamelCase = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) __UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): __UpperCamelCase = model(**__A )[0] __UpperCamelCase = torch.Size((1, 1_1, 1_0_2_4) ) self.assertEqual(output.shape , __A ) # change to expected output here __UpperCamelCase = torch.tensor( [[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__A ) # change to intended input __UpperCamelCase = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) __UpperCamelCase = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) __UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): __UpperCamelCase = model(**__A )[0] __UpperCamelCase = torch.Size((1, 1_1, model.config.vocab_size) ) self.assertEqual(output.shape , __A ) # change to expected output here __UpperCamelCase = torch.tensor( [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__A ) __UpperCamelCase = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' ) __UpperCamelCase = [ 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent' ' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de' ' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.', ] # The below article tests that we don't add any hypotheses outside of the top n_beams __UpperCamelCase = tokenizer(__A , padding=__A , return_tensors='pt' ) __UpperCamelCase = model.generate( input_ids=dct['input_ids'].to(__A ) , attention_mask=dct['attention_mask'].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , ) __UpperCamelCase = [ 'The NSA case highlights the total absence of intelligence debate', 'I think there are two levels of response from the French government.', 'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.' ' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all' ' communications in France.', ] __UpperCamelCase = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A ) assert generated == expected_en
399
1
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase ( self ): lowercase_ :int = 0 @slow def UpperCamelCase ( self ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowercase_ :Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowercase_ :Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) def UpperCamelCase ( self ): lowercase_ :str = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def UpperCamelCase ( self ): lowercase_ :str = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def UpperCamelCase ( self ): lowercase_ :List[Any] = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) # Check that tokenizer_type ≠ model_type lowercase_ :int = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowercase_ :List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @require_tokenizers def UpperCamelCase ( self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowercase_ :Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowercase_ :Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def UpperCamelCase ( self ): with pytest.raises(UpperCamelCase_ ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def UpperCamelCase ( self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowercase_ :Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ ) else: self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def UpperCamelCase ( self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): lowercase_ :Optional[Any] = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def UpperCamelCase ( self ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowercase_ :Any = TOKENIZER_MAPPING.values() lowercase_ :str = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(UpperCamelCase_ ) @require_tokenizers def UpperCamelCase ( self ): self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ ) @require_tokenizers def UpperCamelCase ( self ): lowercase_ :Any = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ ) lowercase_ :Tuple = '''Hello, world. How are you?''' lowercase_ :Optional[int] = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ ) lowercase_ :Dict = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def UpperCamelCase ( self ): lowercase_ :str = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 3_0000 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def UpperCamelCase ( self ): lowercase_ :List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowercase_ :int = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def UpperCamelCase ( self ): lowercase_ :Optional[int] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def UpperCamelCase ( self ): # Check we can load the tokenizer config of an online model. lowercase_ :Union[str, Any] = get_tokenizer_config('''bert-base-cased''' ) lowercase_ :Dict = config.pop('''_commit_hash''' , UpperCamelCase_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowercase_ :Dict = get_tokenizer_config(UpperCamelCase_ ) self.assertDictEqual(UpperCamelCase_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowercase_ :Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowercase_ :Optional[Any] = get_tokenizer_config(UpperCamelCase_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def UpperCamelCase ( self ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) lowercase_ :List[Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowercase_ :Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def UpperCamelCase ( self ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) # Can register in two steps AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ :Optional[Any] = BertTokenizerFast.from_pretrained(UpperCamelCase_ ) bert_tokenizer.save_pretrained(UpperCamelCase_ ) lowercase_ :str = CustomTokenizerFast.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowercase_ :Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowercase_ :Dict = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def UpperCamelCase ( self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCamelCase_ ): lowercase_ :List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase_ ): lowercase_ :Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) lowercase_ :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowercase_ :Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowercase_ :List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def UpperCamelCase ( self ): class UpperCamelCase ( lowercase__ ): '''simple docstring''' lowercase : List[str] =False class UpperCamelCase ( lowercase__ ): '''simple docstring''' lowercase : Any =NewTokenizer lowercase : Union[str, Any] =False try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # If remote code is not set, the default is to use local lowercase_ :List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowercase_ :Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowercase_ :Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) lowercase_ :List[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def UpperCamelCase ( self ): lowercase_ :List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowercase_ :Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def UpperCamelCase ( self ): with self.assertRaisesRegex( UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowercase_ :List[str] = AutoTokenizer.from_pretrained('''bert-base''' ) def UpperCamelCase ( self ): with self.assertRaisesRegex( UpperCamelCase_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowercase_ :int = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' ) def UpperCamelCase ( self ): # Make sure we have cached the tokenizer. lowercase_ :List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowercase_ :Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
703
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase ( _a , _a , _a=None ) -> Dict: '''simple docstring''' assert torch_layer.weight.shape == weight.shape, f"{torch_layer} layer.weight does not match" lowercase_ :str = nn.Parameter(_a ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"{torch_layer} layer.bias does not match" lowercase_ :List[str] = nn.Parameter(_a ) def UpperCamelCase ( _a , _a , _a ) -> List[str]: '''simple docstring''' lowercase_ :Optional[int] = np.asarray(weights[0] ) lowercase_ :str = np.asarray(weights[1] ) lowercase_ :Union[str, Any] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , ) set_param( torch_layer.output.dense , torch.tensor(_a ).view(-1 , _a ).contiguous().transpose(0 , 1 ) , ) def UpperCamelCase ( _a , _a , _a ) -> Optional[int]: '''simple docstring''' lowercase_ :Union[str, Any] = np.asarray(weights[0] ) lowercase_ :Tuple = np.asarray(weights[1] ) lowercase_ :List[Any] = np.asarray(weights[2] ) lowercase_ :Optional[int] = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , ) set_param( torch_layer.self_attention.key , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , ) set_param( torch_layer.self_attention.value , torch.tensor(_a ).transpose(1 , 2 ).contiguous().view(-1 , _a ) , ) set_param( torch_layer.output.dense , torch.tensor(_a ).view(-1 , _a ).contiguous().transpose(0 , 1 ) , ) def UpperCamelCase ( _a , _a , _a ) -> Optional[Any]: '''simple docstring''' lowercase_ :List[str] = weights[0][0][0] lowercase_ :Optional[Any] = np.asarray(layer_norm_a[0] ) lowercase_ :Any = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , ) # lsh weights + output lowercase_ :int = weights[0][1] if len(_a ) < 4: set_layer_weights_in_torch_lsh(_a , torch_block.attention , _a ) else: set_layer_weights_in_torch_local(_a , torch_block.attention , _a ) # intermediate weighs lowercase_ :Optional[int] = weights[2][0][1][2] # Chunked Feed Forward if len(_a ) == 4: lowercase_ :Tuple = intermediate_weights[2] # layernorm 2 lowercase_ :int = np.asarray(intermediate_weights[0][0] ) lowercase_ :Any = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , ) # intermediate dense lowercase_ :Any = np.asarray(intermediate_weights[1][0] ) lowercase_ :int = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , ) # intermediate out lowercase_ :Optional[int] = np.asarray(intermediate_weights[4][0] ) lowercase_ :Any = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , ) def UpperCamelCase ( _a , _a , _a ) -> Optional[Any]: '''simple docstring''' lowercase_ :Any = torch_model.reformer # word embeds lowercase_ :int = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(_a ) , ) if isinstance(weights[3] , _a ): lowercase_ :Union[str, Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowercase_ :Tuple = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"{position_embeddings[emb_idx]} emb does not match" lowercase_ :Optional[int] = nn.Parameter(torch.tensor(_a ) ) lowercase_ :Any = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( _a ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowercase_ :int = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(_a , _a , _a ) # output layer norm lowercase_ :Optional[Any] = np.asarray(weights[7][0] ) lowercase_ :int = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(_a ) , torch.tensor(_a ) , ) # output embeddings lowercase_ :Optional[int] = np.asarray(weights[9][0] ) lowercase_ :Union[str, Any] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(_a ).transpose(0 , 1 ).contiguous() , torch.tensor(_a ) , ) def UpperCamelCase ( _a , _a , _a ) -> str: '''simple docstring''' lowercase_ :List[str] = ReformerConfig.from_json_file(_a ) print(f"Building PyTorch model from configuration: {config}" ) lowercase_ :List[str] = ReformerModelWithLMHead(_a ) with open(_a , '''rb''' ) as f: lowercase_ :Dict = pickle.load(_a )['''weights'''] set_model_weights_in_torch(_a , _a , config.hidden_size ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
441
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : str = { '''microsoft/trocr-base-handwritten''': ( '''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json''' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''trocr''' lowerCAmelCase = ['''past_key_values'''] lowerCAmelCase = { '''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model''', '''num_hidden_layers''': '''decoder_layers''', } def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ): '''simple docstring''' __A : List[str] = vocab_size __A : List[str] = d_model __A : List[Any] = decoder_layers __A : List[Any] = decoder_attention_heads __A : List[str] = decoder_ffn_dim __A : Union[str, Any] = activation_function __A : str = max_position_embeddings __A : Tuple = dropout __A : Union[str, Any] = attention_dropout __A : List[Any] = activation_dropout __A : Optional[int] = init_std __A : int = decoder_layerdrop __A : List[Any] = use_cache __A : str = scale_embedding __A : Optional[int] = use_learned_position_embeddings __A : Optional[Any] = layernorm_embedding super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
8
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""): raise Exception("""requires fairseq >= 1.0.0a""") logging.set_verbosity_info() __lowercase : Union[str, Any] = logging.get_logger(__name__) __lowercase : List[str] = """Hello world! cécé herlolip""" def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : bool ): lowerCamelCase_ = FairseqRobertaModel.from_pretrained(_lowerCamelCase ) roberta.eval() # disable dropout lowerCamelCase_ = roberta.model.encoder.sentence_encoder lowerCamelCase_ = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: lowerCamelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , _lowerCamelCase ) lowerCamelCase_ = XLMRobertaXLForSequenceClassification(_lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCamelCase ) model.eval() # Now let's copy all the weights. # Embeddings lowerCamelCase_ = roberta_sent_encoder.embed_tokens.weight lowerCamelCase_ = roberta_sent_encoder.embed_positions.weight lowerCamelCase_ = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. lowerCamelCase_ = roberta_sent_encoder.layer_norm.weight lowerCamelCase_ = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer lowerCamelCase_ = model.roberta.encoder.layer[i] lowerCamelCase_ = roberta_sent_encoder.layers[i] lowerCamelCase_ = layer.attention lowerCamelCase_ = roberta_layer.self_attn_layer_norm.weight lowerCamelCase_ = roberta_layer.self_attn_layer_norm.bias # self attention lowerCamelCase_ = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) lowerCamelCase_ = roberta_layer.self_attn.q_proj.weight lowerCamelCase_ = roberta_layer.self_attn.q_proj.bias lowerCamelCase_ = roberta_layer.self_attn.k_proj.weight lowerCamelCase_ = roberta_layer.self_attn.k_proj.bias lowerCamelCase_ = roberta_layer.self_attn.v_proj.weight lowerCamelCase_ = roberta_layer.self_attn.v_proj.bias # self-attention output lowerCamelCase_ = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape lowerCamelCase_ = roberta_layer.self_attn.out_proj.weight lowerCamelCase_ = roberta_layer.self_attn.out_proj.bias # this one is final layer norm lowerCamelCase_ = roberta_layer.final_layer_norm.weight lowerCamelCase_ = roberta_layer.final_layer_norm.bias # intermediate lowerCamelCase_ = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape lowerCamelCase_ = roberta_layer.fca.weight lowerCamelCase_ = roberta_layer.fca.bias # output lowerCamelCase_ = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape lowerCamelCase_ = roberta_layer.fca.weight lowerCamelCase_ = roberta_layer.fca.bias # end of layer if classification_head: lowerCamelCase_ = roberta.model.classification_heads['''mnli'''].dense.weight lowerCamelCase_ = roberta.model.classification_heads['''mnli'''].dense.bias lowerCamelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.weight lowerCamelCase_ = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCamelCase_ = roberta.model.encoder.lm_head.dense.weight lowerCamelCase_ = roberta.model.encoder.lm_head.dense.bias lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.weight lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.bias lowerCamelCase_ = roberta.model.encoder.lm_head.weight lowerCamelCase_ = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCamelCase_ = roberta.encode(_lowerCamelCase ).unsqueeze(0 ) # batch of size 1 lowerCamelCase_ = model(_lowerCamelCase )[0] if classification_head: lowerCamelCase_ = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_lowerCamelCase ) ) else: lowerCamelCase_ = roberta.model(_lowerCamelCase )[0] print(our_output.shape , their_output.shape ) lowerCamelCase_ = torch.max(torch.abs(our_output - their_output ) ).item() print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 lowerCamelCase_ = torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(_lowerCamelCase ).mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __lowercase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) __lowercase : Dict = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
142
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _UpperCAmelCase = False class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) A_ : Union[str, Any] = torch.manual_seed(0 ) A_ : int = pipe.dual_guided( prompt='first prompt' , image=lowercase , text_to_image_strength=0.75 , generator=lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase ) A_ : Any = VersatileDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : List[Any] = generator.manual_seed(0 ) A_ : int = pipe.dual_guided( prompt='first prompt' , image=lowercase , text_to_image_strength=0.75 , generator=lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : List[Any] = 'cyberpunk 2077' A_ : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) A_ : Any = torch.manual_seed(0 ) A_ : Optional[int] = pipe.dual_guided( prompt=lowercase , image=lowercase , text_to_image_strength=0.75 , generator=lowercase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images A_ : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) A_ : Optional[int] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 A_ : Dict = 'A painting of a squirrel eating a burger ' A_ : Optional[int] = torch.manual_seed(0 ) A_ : Tuple = pipe.text_to_image( prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images A_ : Dict = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) A_ : Tuple = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 A_ : Tuple = pipe.image_variation(lowercase , generator=lowercase , output_type='numpy' ).images A_ : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) A_ : List[str] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
710
def UpperCamelCase ( __lowercase : str ,__lowercase : int ): '''simple docstring''' A_ : int = word.split() def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str: A_ : Optional[Any] = max_width - width A_ : Union[str, Any] = len(__lowercase ) if len(__lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: A_ : Dict = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] A_ : int = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] A_ : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__lowercase ): num_spaces_between_words_list[i] += 1 A_ : Tuple = [] for i in range(__lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__lowercase ) A_ : List[str] = [] A_ : list[str] = [] A_ : Dict = 0 for word in words: if width + len(__lowercase ) + len(__lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__lowercase ) width += len(__lowercase ) else: # justify the line and add it to result answer.append(justify(__lowercase ,__lowercase ,__lowercase ) ) # reset new line and new width A_ , A_ : Any = [word], len(__lowercase ) A_ : int = max_width - width - len(__lowercase ) answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
70
0