|
|
|
|
|
|
|
|
|
|
|
from transformers import PretrainedConfig |
|
|
|
|
|
|
|
|
class MixtureOfRecursionsConfig(PretrainedConfig): |
|
|
""" |
|
|
Configuration class for MixtureOfRecursions model. |
|
|
|
|
|
This class stores the configuration of a MixtureOfRecursions model with recursive transformers. |
|
|
""" |
|
|
model_type = "mixture_of_recursions" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_size=10000, |
|
|
hidden_size=256, |
|
|
num_hidden_layers=4, |
|
|
num_attention_heads=8, |
|
|
intermediate_size=1024, |
|
|
max_position_embeddings=512, |
|
|
max_recursion_depth=3, |
|
|
attention_dropout=0.1, |
|
|
hidden_dropout=0.1, |
|
|
initializer_range=0.02, |
|
|
layer_norm_eps=1e-5, |
|
|
use_cache=True, |
|
|
pad_token_id=0, |
|
|
bos_token_id=1, |
|
|
eos_token_id=2, |
|
|
tie_word_embeddings=False, |
|
|
**kwargs |
|
|
): |
|
|
""" |
|
|
Args: |
|
|
vocab_size (int): Vocabulary size of the model |
|
|
hidden_size (int): Dimension of the hidden representations |
|
|
num_hidden_layers (int): Number of transformer layers |
|
|
num_attention_heads (int): Number of attention heads |
|
|
intermediate_size (int): Dimension of the feedforward network |
|
|
max_position_embeddings (int): Maximum sequence length |
|
|
max_recursion_depth (int): Maximum depth of recursive processing |
|
|
attention_dropout (float): Dropout probability for attention layers |
|
|
hidden_dropout (float): Dropout probability for hidden layers |
|
|
initializer_range (float): Standard deviation for weight initialization |
|
|
layer_norm_eps (float): Epsilon for layer normalization |
|
|
use_cache (bool): Whether to use past key values for faster generation |
|
|
pad_token_id (int): Token ID for padding |
|
|
bos_token_id (int): Token ID for beginning of sequence |
|
|
eos_token_id (int): Token ID for end of sequence |
|
|
tie_word_embeddings (bool): Whether to tie input and output embeddings |
|
|
""" |
|
|
super().__init__( |
|
|
pad_token_id=pad_token_id, |
|
|
bos_token_id=bos_token_id, |
|
|
eos_token_id=eos_token_id, |
|
|
tie_word_embeddings=tie_word_embeddings, |
|
|
**kwargs |
|
|
) |
|
|
|
|
|
self.vocab_size = vocab_size |
|
|
self.hidden_size = hidden_size |
|
|
self.num_hidden_layers = num_hidden_layers |
|
|
self.num_attention_heads = num_attention_heads |
|
|
self.intermediate_size = intermediate_size |
|
|
self.max_position_embeddings = max_position_embeddings |
|
|
self.max_recursion_depth = max_recursion_depth |
|
|
self.attention_dropout = attention_dropout |
|
|
self.hidden_dropout = hidden_dropout |
|
|
self.initializer_range = initializer_range |
|
|
self.layer_norm_eps = layer_norm_eps |
|
|
self.use_cache = use_cache |