hardlyworking commited on
Commit
8a88702
·
verified ·
1 Parent(s): 87c66b1

Upload mistral-small-3.1-24B-lora.yml

Browse files
Files changed (1) hide show
  1. mistral-small-3.1-24B-lora.yml +61 -0
mistral-small-3.1-24B-lora.yml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: mistralai/Mistral-Small-3.2-24B-Instruct-2506
2
+ processor_type: AutoProcessor
3
+
4
+ load_in_8bit: false
5
+ load_in_4bit: true
6
+
7
+ # these 3 lines are needed for now to handle vision chat templates w images
8
+ skip_prepare_dataset: true
9
+ remove_unused_columns: false
10
+ sample_packing: false
11
+
12
+ chat_template: mistral_v7_tekken
13
+ datasets:
14
+ - path: hardlyworking/HardlyRPv2-10k
15
+ type: chat_template
16
+ split: train
17
+ field_messages: messages
18
+ dataset_prepared_path: last_run_prepared
19
+ val_set_size: 0.05
20
+ output_dir: ./outputs/out
21
+
22
+ adapter: qlora
23
+ lora_model_dir:
24
+
25
+ sequence_len: 8192
26
+ pad_to_sequence_len: false
27
+
28
+ lora_r: 64
29
+ lora_alpha: 32
30
+ lora_dropout: 0.05
31
+ lora_target_modules: 'model.language_model.layers.[\d]+.(mlp|cross_attn|self_attn).(up|down|gate|q|k|v|o)_proj'
32
+
33
+ wandb_project:
34
+ wandb_entity:
35
+ wandb_watch:
36
+ wandb_name:
37
+ wandb_log_model:
38
+
39
+ gradient_accumulation_steps: 1
40
+ micro_batch_size: 16
41
+ num_epochs: 1
42
+ optimizer: adamw_bnb_8bit
43
+ lr_scheduler: cosine
44
+ learning_rate: 0.0002
45
+
46
+ bf16: true
47
+ fp16:
48
+ tf32: true
49
+
50
+ gradient_checkpointing: unsloth
51
+ logging_steps: 1
52
+ # flash_attention: false # PixtralVisionModel does not support Flash Attention 2.0 yet.
53
+ sdp_attention: true
54
+
55
+ warmup_ratio: 0.1
56
+ evals_per_epoch: 4
57
+ saves_per_epoch: 1
58
+ weight_decay: 0.0
59
+ special_tokens:
60
+
61
+ # save_first_step: true # uncomment this to validate checkpoint saving works with your config