# modeling_mixture_of_recursions.py # Create this file in your repository root import torch from transformers import PreTrainedModel from transformers.modeling_outputs import CausalLMOutputWithPast from typing import Optional, Tuple # Import your existing model try: from model_slm import * # Import everything from your existing model file except: pass # Will work when uploaded to HF from .configuration_mixture_of_recursions import MixtureOfRecursionsConfig class MixtureOfRecursionsPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MixtureOfRecursionsConfig base_model_prefix = "model" supports_gradient_checkpointing = False _no_split_modules = [] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, torch.nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, torch.nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, torch.nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class MixtureOfRecursionsModel(MixtureOfRecursionsPreTrainedModel): """ Wrapper around your existing model to make it compatible with Transformers """ def __init__(self, config): super().__init__(config) self.config = config # This should match your actual model initialization from model_slm.py # Replace this with your actual model class name # For example: self.model = YourModelClass(config) # Placeholder - update with your actual model architecture self.vocab_size = config.vocab_size self.hidden_size = config.hidden_size self.post_init() def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): # Your forward pass logic from model_slm.py # This is a placeholder - replace with your actual forward implementation pass class MixtureOfRecursionsForCausalLM(MixtureOfRecursionsPreTrainedModel): """ Causal LM head wrapper for your model """ _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = MixtureOfRecursionsModel(config) self.vocab_size = config.vocab_size self.lm_head = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens if hasattr(self.model, 'embed_tokens') else None def set_input_embeddings(self, value): if hasattr(self.model, 'embed_tokens'): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Forward pass through model outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if isinstance(outputs, tuple) else outputs.last_hidden_state logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift for causal language modeling shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = torch.nn.CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values if hasattr(outputs, 'past_key_values') else None, hidden_states=outputs.hidden_states if hasattr(outputs, 'hidden_states') else None, attentions=outputs.attentions if hasattr(outputs, 'attentions') else None, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs