Built with Axolotl

See axolotl config

axolotl version: 0.12.2

# === Model Configuration ===
base_model: Columbidae/apertus-12b-chatml-untrained
trust_remote_code: true
load_in_8bit: false
load_in_4bit: false

# === HF Configuration === 
hub_model_id: allura-forge/apertus-12b-cpt-attempt
hub_strategy: "every_save"
output_dir: ckpts

# === Wandb Tracking ===
wandb_project: ApertusV2
# wandb_entity: [WANDB_ENTITY]
wandb_name: 12b-cpt-part1

# === Training Setup ===
num_epochs: 1
micro_batch_size: 1
gradient_accumulation_steps: 16
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
#pad_to_sequence_len: true
#temperature: 0.7
#max_steps: 10
# === Evaluation ===
#val_set_size: 0.025
#evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
#eval_max_new_tokens: 128
#eval_sample_packing: true
eval_strategy: "no"

# === LoRA Configuration ===
adapter:
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.05
lora_target_linear: true
lora_target_modules:
#  - up_proj
#  - down_proj
#  - gate_proj
#  - q_proj
#  - v_proj
#  - k_proj
#  - o_proj
#  - input_layernorm
#  - post_attention_layernorm
#  - embed_tokens
#  - lm_head

lora_fan_in_fan_out:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true
#unfrozen_parameters:
#  - model.layers.[0-9+].mlp.up_proj
#  - model.layers.[0-9+].mlp.down_proj
#  - model.layers.[0-9+].feedforward_layernorm
#  - embed_tokens
#  - lm_head
# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
#warmup_steps: 0
warmup_ratio: 0.025
#optimizer: adamw_torch_fused
optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: cosine
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#
#chat_template: jinja
chat_template: chatml
special_tokens:
#  eos_token: "<|im_end|>"
#  eos_token: "</s>"
#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: allura-org/the-anarchist-library
    type: completion
    split: train[:20%]
#  - path: grimulkan/LimaRP-augmented
#    type: chat_template
#    field_messages: conversations
#    message_property_mappings:
#      role: from
#      content: value
#  - path: allenai/tulu-3-sft-personas-instruction-following
#    type: chat_template
#    split: train[:10%]
#  - path: ToastyPigeon/mixed-medical-reasoning-formatted
#    type: chat_template
#    data_files: mixed-medical-thinking.json
#    split: train[:10%]
  - path: ToastyPigeon/steve-and-marvin
    type: completion
    data_files: marvin.json
  - path: ToastyPigeon/kimi-stories-completion
    type: completion
  - path: ToastyPigeon/some-erotica
 #   type: customcompletion-regex
    type: completion
    split: train[50%:65%]
#    data_files: new-story-dataset-v2.json
#  - path: allura-org/fujin-instruct-v2
#    type: customchatml-regex
#    type: chat_template
#    field_messages: conversations
#    message_property_mappings:
#      role: from
#      content: value
#  - path: ToastyPigeon/some-rp-extended
 #   type: customchatml-regex
#    type: chat_template
#    field_messages: conversations
#    message_property_mappings:
#      role: from
#      content: value
#    roles_to_train: ["user","assistant"]
  - path: Alfitaria/rosier-inf
    type: completion
    split: train[:20%]
#  - path: allura-forge/koto-instruct-sft-nothink
#    type: customchatml-regex
#    type: chat_template
#    split: train[:50%]
#    field_messages: conversations
#    message_property_mappings:
#      role: from
#      content: value
  - path: ToastyPigeon/SpringDragon
#    type: customcompletion-regex
    type: completion
    split: train
  - path: ToastyPigeon/erotic-books-clone
#    type: customcompletion-regex
    type: completion
    split: train[:20%]
#    split: train[35%:45%]
#  - path: ToastyPigeon/tulu-mini
#    type: chat_template
dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
gradient_checkpointing: true
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: ../axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
#fsdp:
#  - full_shard
#  - auto_wrap
#fsdp_config:
#  fsdp_limit_all_gathers: true
#  fsdp_sync_module_states: true
#  fsdp_offload_params: true
#  fsdp_activation_checkpointing: true
#  fsdp_use_orig_params: true
#  fsdp_cpu_ram_efficient_loading: true
#  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
#  fsdp_transformer_layer_cls_to_wrap: ApertusDecoderLayer
#  fsdp_state_dict_type: FULL_STATE_DICT
#  fsdp_sharding_strategy: FULL_SHARD
#fsdp_stage: 2
#fsdp_final_state_dict_type: FULL_STATE_DICT

# === Checkpointing ===
#save_steps: 2
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
seed: 420
gc_steps: 10


apertus-12b-cpt-attempt

This model is a fine-tuned version of Columbidae/apertus-12b-chatml-untrained on the allura-org/the-anarchist-library, the ToastyPigeon/steve-and-marvin, the ToastyPigeon/kimi-stories-completion, the ToastyPigeon/some-erotica, the Alfitaria/rosier-inf, the ToastyPigeon/SpringDragon and the ToastyPigeon/erotic-books-clone datasets.

Model description

More information needed

Intended uses & limitations

More information needed

Training and evaluation data

More information needed

Training procedure

Training hyperparameters

The following hyperparameters were used during training:

  • learning_rate: 1e-05
  • train_batch_size: 1
  • eval_batch_size: 1
  • seed: 420
  • distributed_type: multi-GPU
  • num_devices: 2
  • gradient_accumulation_steps: 16
  • total_train_batch_size: 32
  • total_eval_batch_size: 2
  • optimizer: Use paged_adamw_8bit with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
  • lr_scheduler_type: cosine
  • lr_scheduler_warmup_steps: 7
  • training_steps: 290

Training results

Framework versions

  • Transformers 4.57.1
  • Pytorch 2.8.0+cu129
  • Datasets 4.0.0
  • Tokenizers 0.22.1
Downloads last month
164
Safetensors
Model size
12B params
Tensor type
BF16
·
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for allura-forge/apertus-12b-cpt-attempt

Finetuned
(1)
this model
Adapters
1 model

Datasets used to train allura-forge/apertus-12b-cpt-attempt