Zaynes commited on
Commit
6348f23
·
verified ·
1 Parent(s): b289678

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,36 +1,8 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
1
+ # Mark all log files as text to prevent binary file issues
2
+ *.log text
3
+ *.txt text
4
+ *.out text
5
+ *.err text
6
+ training_artifacts/logs/* text
7
+ model.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cf35f52717fd16b5fb8af87b3d58b2e520f56753db16ba6ef53dd60ad513e22
3
  size 988097824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f35a9ff353519e1810e68e0822951de54782b906bbd0b82220765f32c65f6db7
3
  size 988097824
training_artifacts/README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Training Artifacts
2
+
3
+ This directory contains the training configuration and logs for this model.
4
+
5
+ ## Contents
6
+
7
+ - **hydra_config.yaml**: Complete Hydra configuration used for training
8
+ - **train_config.yaml**: LlamaFactory training configuration
9
+ - **merge_config.yaml**: LlamaFactory merge/export configuration
10
+ - **logs/**: Training logs from the job (cleaned for text format)
11
+
12
+ ## Job Information
13
+
14
+ - Job Name: lf_torch_test__local
15
+ - Timestamp: 2025-10-22 20:16:21 UTC
16
+ - Execution Mode: Local
training_artifacts/hydra_config.yaml ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ? ''
2
+ : ? ''
3
+ : ? ''
4
+ : hydra:
5
+ run:
6
+ dir: .
7
+ output_subdir: null
8
+ job:
9
+ chdir: false
10
+ _target_: null
11
+ job:
12
+ name: ???
13
+ mode: slurm
14
+ work_dir: null
15
+ dry_run: false
16
+ slurm:
17
+ time_limit: ???
18
+ constraint:
19
+ - h200
20
+ memory: 200
21
+ cpus_per_task: 16
22
+ partition: null
23
+ mail_user: user@example.com
24
+ execution:
25
+ nodes: null
26
+ gpus_per_node: null
27
+ num_gpus: null
28
+ hostfile: null
29
+ secrets_file: null
30
+ model:
31
+ name_or_path: ???
32
+ finetuning_type: lora
33
+ dataset:
34
+ name: ???
35
+ dir: null
36
+ info_json: null
37
+ template: default
38
+ cutoff_len: 1024
39
+ val_size: 0.1
40
+ hf_hub_url: null
41
+ formatting: alpaca
42
+ ranking: false
43
+ subset: null
44
+ split: train
45
+ folder: null
46
+ num_samples: null
47
+ columns:
48
+ prompt: null
49
+ query: null
50
+ response: null
51
+ history: null
52
+ messages: null
53
+ system: null
54
+ tools: null
55
+ images: null
56
+ videos: null
57
+ audios: null
58
+ chosen: null
59
+ rejected: null
60
+ kto_tag: null
61
+ tags:
62
+ role: null
63
+ content: null
64
+ user: null
65
+ assistant: null
66
+ observation: null
67
+ function: null
68
+ system: null
69
+ training:
70
+ stage: sft
71
+ do_train: true
72
+ model_name_or_path: null
73
+ finetuning_type: lora
74
+ trust_remote_code: true
75
+ dataset: null
76
+ dataset_dir: null
77
+ template: default
78
+ cutoff_len: 1024
79
+ val_size: 0.1
80
+ preprocessing_num_workers: 1
81
+ dataset_num_proc: 1
82
+ dataloader_num_workers: 0
83
+ streaming: false
84
+ learning_rate: 5.0e-05
85
+ num_train_epochs: 3.0
86
+ per_device_train_batch_size: 1
87
+ per_device_eval_batch_size: 1
88
+ gradient_accumulation_steps: 8
89
+ lr_scheduler_type: cosine
90
+ warmup_ratio: 0.1
91
+ warmup_steps: 0
92
+ lora_rank: 8
93
+ lora_alpha: 16
94
+ lora_dropout: 0.05
95
+ lora_target: all
96
+ optim: adamw_torch
97
+ bf16: true
98
+ fp16: false
99
+ output_dir: null
100
+ save_strategy: epoch
101
+ save_steps: 500
102
+ save_total_limit: 3
103
+ save_only_model: false
104
+ eval_strategy: steps
105
+ eval_steps: 500
106
+ do_eval: true
107
+ logging_steps: 10
108
+ plot_loss: true
109
+ report_to: none
110
+ gradient_checkpointing: true
111
+ ddp_timeout: 180000000
112
+ include_num_input_tokens_seen: true
113
+ overwrite_output_dir: true
114
+ overwrite_cache: false
115
+ seed: 42
116
+ lora:
117
+ rank: 8
118
+ alpha: 16
119
+ dropout: 0.05
120
+ target: all
121
+ output:
122
+ experiment_dir: ./experiments
123
+ merge:
124
+ stage: export
125
+ model_name_or_path: null
126
+ adapter_name_or_path: null
127
+ template: default
128
+ export_dir: null
129
+ export_size: 2
130
+ export_device: auto
131
+ export_legacy_format: false
132
+ finetuning_type: lora
133
+ wandb:
134
+ project: null
135
+ run_name: null
136
+ entity: null
137
+ hf:
138
+ repo_id: null
139
+ private: false
140
+ upload_artifacts: true
141
+ cleanup:
142
+ checkpoints: false
143
+ merged: false
144
+ job:
145
+ name: lf_torch_test__local
146
+ mode: local
147
+ work_dir: null
148
+ dry_run: false
149
+ slurm:
150
+ time_limit: null
151
+ constraint: null
152
+ memory: null
153
+ partition: null
154
+ mail_user: null
155
+ execution:
156
+ nodes: 1
157
+ gpus_per_node: 2
158
+ num_gpus: null
159
+ hostfile: null
160
+ secrets_file: ./secrets.env
161
+ model:
162
+ name_or_path: Qwen/Qwen2.5-0.5B
163
+ finetuning_type: lora
164
+ lora:
165
+ rank: 8
166
+ alpha: 16
167
+ dropout: 0.05
168
+ target: all
169
+ dataset:
170
+ name: my_custom_sft
171
+ dir: null
172
+ info_json: null
173
+ template: default
174
+ cutoff_len: 8096
175
+ val_size: 0.1
176
+ hf_hub_url: TAUR-dev/D-SFT_C-sft_exp_AT_pvv2__fixed-sft-data
177
+ formatting: sharegpt
178
+ ranking: false
179
+ subset: null
180
+ split: train
181
+ folder: null
182
+ num_samples: null
183
+ columns:
184
+ messages: conversations
185
+ tags:
186
+ role: role
187
+ content: content
188
+ user: user
189
+ assistant: assistant
190
+ output:
191
+ experiment_dir: ./experiments
192
+ wandb:
193
+ project: null
194
+ run_name: local_test
195
+ entity: null
196
+ hf:
197
+ repo_id: TAUR-dev/testing_llamafactory_helper_quick_test__local
198
+ private: false
199
+ cleanup:
200
+ checkpoints: false
201
+ merged: false
202
+ training:
203
+ stage: sft
204
+ do_train: true
205
+ max_steps: 100
206
+ do_eval: false
207
+ save_strategy: steps
208
+ save_steps: 50
209
+ logging_steps: 10
210
+ fp16: true
211
+ bf16: false
212
+ overwrite_output_dir: true
213
+ per_device_train_batch_size: 1
214
+ gradient_accumulation_steps: 1
215
+ gradient_checkpointing: true
216
+ merge: {}
training_artifacts/logs/pipeline_cleaned.txt ADDED
@@ -0,0 +1,974 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ========================================
2
+ Job Name: lf_torch_test__local
3
+ Hostname: gl064.hpc.nyu.edu
4
+ Number of nodes: 1
5
+ GPUs per node: 2
6
+ Start Time: Wed Oct 22 04:15:08 PM EDT 2025
7
+ Log file: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/logs/pipeline.log
8
+ ========================================
9
+ Sourcing secrets from: /scratch/zrs2020/LlamaFactoryHelper/secrets.env
10
+
11
+ ========================================
12
+ Configuration Paths
13
+ ========================================
14
+ Train Config: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/configs/train_config.yaml
15
+ Merge Config: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/configs/merge_config.yaml
16
+ Dataset Info:
17
+ Output Dir: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints
18
+ Export Dir: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/merged
19
+ HF Repo ID: TAUR-dev/testing_llamafactory_helper_quick_test__local
20
+
21
+
22
+ ========================================
23
+ STAGE 1: Training Model
24
+ Start Time: Wed Oct 22 04:15:10 PM EDT 2025
25
+ ========================================
26
+ Single-node multi-GPU training detected
27
+ GPUs: 2
28
+ CUDA_VISIBLE_DEVICES: 0,1
29
+ Executing command: llamafactory-cli train /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/configs/train_config.yaml
30
+ /scratch/zrs2020/miniconda/miniconda3/envs/llamafactory/lib/python3.12/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.
31
+ warnings.warn(
32
+ [INFO|2025-10-22 16:15:19] llamafactory.launcher:143 >> Initializing 2 distributed tasks at: 127.0.0.1:45183
33
+ W1022 16:15:20.576000 2375530 site-packages/torch/distributed/run.py:803]
34
+ W1022 16:15:20.576000 2375530 site-packages/torch/distributed/run.py:803] *****************************************
35
+ W1022 16:15:20.576000 2375530 site-packages/torch/distributed/run.py:803] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
36
+ W1022 16:15:20.576000 2375530 site-packages/torch/distributed/run.py:803] *****************************************
37
+ /scratch/zrs2020/miniconda/miniconda3/envs/llamafactory/lib/python3.12/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.
38
+ warnings.warn(
39
+ /scratch/zrs2020/miniconda/miniconda3/envs/llamafactory/lib/python3.12/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.
40
+ warnings.warn(
41
+ /scratch/zrs2020/miniconda/miniconda3/envs/llamafactory/lib/python3.12/site-packages/jieba/_compat.py:18: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.
42
+ import pkg_resources
43
+ /scratch/zrs2020/miniconda/miniconda3/envs/llamafactory/lib/python3.12/site-packages/jieba/_compat.py:18: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.
44
+ import pkg_resources
45
+ [W1022 16:15:28.663709296 ProcessGroupNCCL.cpp:924] Warning: TORCH_NCCL_AVOID_RECORD_STREAMS is the default now, this environment variable is thus deprecated. (function operator())
46
+ [W1022 16:15:28.663713666 ProcessGroupNCCL.cpp:924] Warning: TORCH_NCCL_AVOID_RECORD_STREAMS is the default now, this environment variable is thus deprecated. (function operator())
47
+ [INFO|2025-10-22 16:15:29] llamafactory.hparams.parser:143 >> Set `ddp_find_unused_parameters` to False in DDP training since LoRA is enabled.
48
+ [INFO|2025-10-22 16:15:29] llamafactory.hparams.parser:423 >> Process rank: 0, world size: 2, device: cuda:0, distributed training: True, compute dtype: torch.float16
49
+ [INFO|2025-10-22 16:15:29] llamafactory.hparams.parser:423 >> Process rank: 1, world size: 2, device: cuda:1, distributed training: True, compute dtype: torch.float16
50
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,285 >> loading file vocab.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/vocab.json
51
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,285 >> loading file merges.txt from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/merges.txt
52
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,285 >> loading file tokenizer.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/tokenizer.json
53
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,285 >> loading file added_tokens.json from cache at None
54
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,285 >> loading file special_tokens_map.json from cache at None
55
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,285 >> loading file tokenizer_config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/tokenizer_config.json
56
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,285 >> loading file chat_template.jinja from cache at None
57
+ [INFO|tokenization_utils_base.py:2364] 2025-10-22 16:15:29,463 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
58
+ [INFO|configuration_utils.py:765] 2025-10-22 16:15:29,648 >> loading configuration file config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/config.json
59
+ [INFO|configuration_utils.py:839] 2025-10-22 16:15:29,650 >> Model config Qwen2Config {
60
+ "architectures": [
61
+ "Qwen2ForCausalLM"
62
+ ],
63
+ "attention_dropout": 0.0,
64
+ "bos_token_id": 151643,
65
+ "dtype": "bfloat16",
66
+ "eos_token_id": 151643,
67
+ "hidden_act": "silu",
68
+ "hidden_size": 896,
69
+ "initializer_range": 0.02,
70
+ "intermediate_size": 4864,
71
+ "layer_types": [
72
+ "full_attention",
73
+ "full_attention",
74
+ "full_attention",
75
+ "full_attention",
76
+ "full_attention",
77
+ "full_attention",
78
+ "full_attention",
79
+ "full_attention",
80
+ "full_attention",
81
+ "full_attention",
82
+ "full_attention",
83
+ "full_attention",
84
+ "full_attention",
85
+ "full_attention",
86
+ "full_attention",
87
+ "full_attention",
88
+ "full_attention",
89
+ "full_attention",
90
+ "full_attention",
91
+ "full_attention",
92
+ "full_attention",
93
+ "full_attention",
94
+ "full_attention",
95
+ "full_attention"
96
+ ],
97
+ "max_position_embeddings": 32768,
98
+ "max_window_layers": 24,
99
+ "model_type": "qwen2",
100
+ "num_attention_heads": 14,
101
+ "num_hidden_layers": 24,
102
+ "num_key_value_heads": 2,
103
+ "rms_norm_eps": 1e-06,
104
+ "rope_scaling": null,
105
+ "rope_theta": 1000000.0,
106
+ "sliding_window": null,
107
+ "tie_word_embeddings": true,
108
+ "transformers_version": "4.57.1",
109
+ "use_cache": true,
110
+ "use_mrope": false,
111
+ "use_sliding_window": false,
112
+ "vocab_size": 151936
113
+ }
114
+
115
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,728 >> loading file vocab.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/vocab.json
116
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,728 >> loading file merges.txt from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/merges.txt
117
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,728 >> loading file tokenizer.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/tokenizer.json
118
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,728 >> loading file added_tokens.json from cache at None
119
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,728 >> loading file special_tokens_map.json from cache at None
120
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,728 >> loading file tokenizer_config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/tokenizer_config.json
121
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:15:29,728 >> loading file chat_template.jinja from cache at None
122
+ [INFO|tokenization_utils_base.py:2364] 2025-10-22 16:15:29,901 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
123
+ [INFO|2025-10-22 16:15:29] llamafactory.data.loader:143 >> Loading dataset TAUR-dev/D-SFT_C-sft_exp_AT_pvv2__fixed-sft-data...
124
+ /scratch/zrs2020/miniconda/miniconda3/envs/llamafactory/lib/python3.12/site-packages/torch/distributed/distributed_c10d.py:4876: UserWarning: barrier(): using the device under current context. You can specify `device_id` in `init_process_group` to mute this warning.
125
+ warnings.warn( # warn only once
126
+ [rank0]:[W1022 16:15:30.381306010 ProcessGroupNCCL.cpp:5068] Guessing device ID based on global rank. This can cause a hang if rank to GPU mapping is heterogeneous. You can specify device_id in init_process_group()
127
+ gl064:2375557:2375557 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to ibs
128
+ gl064:2375557:2375557 [0] NCCL INFO Bootstrap: Using ibs3:10.0.5.0<0>
129
+ gl064:2375557:2375557 [0] NCCL INFO cudaDriverVersion 13000
130
+ gl064:2375557:2375557 [0] NCCL INFO NCCL version 2.27.5+cuda12.9
131
+ gl064:2375557:2375557 [0] NCCL INFO Comm config Blocking set to 1
132
+ gl064:2375558:2375558 [1] NCCL INFO cudaDriverVersion 13000
133
+ gl064:2375558:2375558 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to ibs
134
+ gl064:2375558:2375558 [1] NCCL INFO Bootstrap: Using ibs3:10.0.5.0<0>
135
+ gl064:2375558:2375558 [1] NCCL INFO NCCL version 2.27.5+cuda12.9
136
+ gl064:2375558:2375558 [1] NCCL INFO Comm config Blocking set to 1
137
+ gl064:2375557:2375626 [0] NCCL INFO NET/Plugin: Could not find: libnccl-net.so.
138
+ gl064:2375557:2375626 [0] NCCL INFO NCCL_IB_DISABLE set by environment to 0.
139
+ gl064:2375557:2375626 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to ibs
140
+ gl064:2375557:2375626 [0] NCCL INFO NCCL_IB_HCA set to mlx5
141
+ gl064:2375558:2375627 [1] NCCL INFO NET/Plugin: Could not find: libnccl-net.so.
142
+ gl064:2375558:2375627 [1] NCCL INFO NCCL_IB_DISABLE set by environment to 0.
143
+ gl064:2375558:2375627 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to ibs
144
+ gl064:2375558:2375627 [1] NCCL INFO NCCL_IB_HCA set to mlx5
145
+ gl064:2375557:2375626 [0] NCCL INFO NET/IB : Using [0]mlx5_0:1/IB [RO]; OOB ibs3:10.0.5.0<0>
146
+ gl064:2375557:2375626 [0] NCCL INFO Initialized NET plugin IB
147
+ gl064:2375557:2375626 [0] NCCL INFO Assigned NET plugin IB to comm
148
+ gl064:2375557:2375626 [0] NCCL INFO Using network IB
149
+ gl064:2375558:2375627 [1] NCCL INFO NET/IB : Using [0]mlx5_0:1/IB [RO]; OOB ibs3:10.0.5.0<0>
150
+ gl064:2375558:2375627 [1] NCCL INFO Initialized NET plugin IB
151
+ gl064:2375557:2375626 [0] NCCL INFO ncclCommInitRankConfig comm 0x13ebaba0 rank 0 nranks 2 cudaDev 0 nvmlDev 0 busId 47000 commId 0x51d04234087668e3 - Init START
152
+ gl064:2375558:2375627 [1] NCCL INFO Assigned NET plugin IB to comm
153
+ gl064:2375558:2375627 [1] NCCL INFO Using network IB
154
+ gl064:2375558:2375627 [1] NCCL INFO ncclCommInitRankConfig comm 0x13fbbf20 rank 1 nranks 2 cudaDev 1 nvmlDev 1 busId 59000 commId 0x51d04234087668e3 - Init START
155
+ gl064:2375557:2375626 [0] NCCL INFO RAS client listening socket at ::1<28028>
156
+ gl064:2375558:2375627 [1] NCCL INFO RAS client listening socket at ::1<28028>
157
+ gl064:2375558:2375627 [1] NCCL INFO Bootstrap timings total 0.000915 (create 0.000023, send 0.000075, recv 0.000197, ring 0.000016, delay 0.000000)
158
+ gl064:2375557:2375626 [0] NCCL INFO Bootstrap timings total 0.009451 (create 0.000024, send 0.000185, recv 0.000337, ring 0.000023, delay 0.000000)
159
+ gl064:2375557:2375626 [0] NCCL INFO Setting affinity for GPU 0 to 0-15
160
+ gl064:2375558:2375627 [1] NCCL INFO Setting affinity for GPU 1 to 0-15
161
+ gl064:2375557:2375626 [0] NCCL INFO comm 0x13ebaba0 rank 0 nRanks 2 nNodes 1 localRanks 2 localRank 0 MNNVL 0
162
+ gl064:2375558:2375627 [1] NCCL INFO comm 0x13fbbf20 rank 1 nRanks 2 nNodes 1 localRanks 2 localRank 1 MNNVL 0
163
+ gl064:2375557:2375626 [0] NCCL INFO Channel 00/02 : 0 1
164
+ gl064:2375558:2375627 [1] NCCL INFO Trees [0] -1/-1/-1->1->0 [1] -1/-1/-1->1->0
165
+ gl064:2375557:2375626 [0] NCCL INFO Channel 01/02 : 0 1
166
+ gl064:2375558:2375627 [1] NCCL INFO P2P Chunksize set to 131072
167
+ gl064:2375557:2375626 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1
168
+ gl064:2375557:2375626 [0] NCCL INFO P2P Chunksize set to 131072
169
+ gl064:2375558:2375627 [1] NCCL INFO PROFILER/Plugin: Could not find: libnccl-profiler.so.
170
+ gl064:2375557:2375626 [0] NCCL INFO PROFILER/Plugin: Could not find: libnccl-profiler.so.
171
+ gl064:2375557:2375626 [0] NCCL INFO Check P2P Type isAllDirectP2p 0 directMode 0
172
+ gl064:2375558:2375633 [1] NCCL INFO [Proxy Service] Device 1 CPU core 11
173
+ gl064:2375558:2375635 [1] NCCL INFO [Proxy Service UDS] Device 1 CPU core 12
174
+ gl064:2375557:2375634 [0] NCCL INFO [Proxy Service] Device 0 CPU core 11
175
+ gl064:2375557:2375636 [0] NCCL INFO [Proxy Service UDS] Device 0 CPU core 13
176
+ gl064:2375558:2375627 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 512 | 512
177
+ gl064:2375558:2375627 [1] NCCL INFO 2 coll channels, 2 collnet channels, 0 nvls channels, 2 p2p channels, 2 p2p channels per peer
178
+ gl064:2375557:2375626 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 512 | 512
179
+ gl064:2375557:2375626 [0] NCCL INFO 2 coll channels, 2 collnet channels, 0 nvls channels, 2 p2p channels, 2 p2p channels per peer
180
+ gl064:2375557:2375626 [0] NCCL INFO CC Off, workFifoBytes 1048576
181
+ gl064:2375558:2375627 [1] NCCL INFO TUNER/Plugin: Could not find: libnccl-tuner.so. Using internal tuner plugin.
182
+ gl064:2375558:2375627 [1] NCCL INFO ncclCommInitRankConfig comm 0x13fbbf20 rank 1 nranks 2 cudaDev 1 nvmlDev 1 busId 59000 commId 0x51d04234087668e3 - Init COMPLETE
183
+ gl064:2375557:2375626 [0] NCCL INFO TUNER/Plugin: Could not find: libnccl-tuner.so. Using internal tuner plugin.
184
+ gl064:2375557:2375626 [0] NCCL INFO ncclCommInitRankConfig comm 0x13ebaba0 rank 0 nranks 2 cudaDev 0 nvmlDev 0 busId 47000 commId 0x51d04234087668e3 - Init COMPLETE
185
+ gl064:2375558:2375627 [1] NCCL INFO Init timings - ncclCommInitRankConfig: rank 1 nranks 2 total 0.12 (kernels 0.09, alloc 0.01, bootstrap 0.00, allgathers 0.01, topo 0.02, graphs 0.00, connections 0.00, rest 0.00)
186
+ gl064:2375557:2375626 [0] NCCL INFO Init timings - ncclCommInitRankConfig: rank 0 nranks 2 total 0.12 (kernels 0.09, alloc 0.01, bootstrap 0.01, allgathers 0.00, topo 0.02, graphs 0.00, connections 0.00, rest 0.00)
187
+ gl064:2375557:2375637 [0] NCCL INFO Channel 00 : 0[0] -> 1[1] via SHM/direct/direct
188
+ gl064:2375558:2375638 [1] NCCL INFO Channel 00 : 1[1] -> 0[0] via SHM/direct/direct
189
+ gl064:2375557:2375637 [0] NCCL INFO Channel 01 : 0[0] -> 1[1] via SHM/direct/direct
190
+ gl064:2375558:2375638 [1] NCCL INFO Channel 01 : 1[1] -> 0[0] via SHM/direct/direct
191
+ gl064:2375557:2375637 [0] NCCL INFO Connected all rings, use ring PXN 0 GDR 1
192
+ gl064:2375558:2375638 [1] NCCL INFO Connected all rings, use ring PXN 0 GDR 1
193
+ training example:
194
+ input_ids:
195
+ [33975, 25, 21806, 279, 2701, 3491, 13, 81917, 697, 32711, 3019, 553, 3019, 13, 3197, 498, 525, 8060, 11, 2968, 697, 4226, 304, 419, 3561, 25, 366, 9217, 2235, 21732, 4226, 12533, 9217, 94367, 2, 22079, 198, 16429, 279, 5109, 304, 279, 1140, 508, 21, 22, 11, 220, 22, 16, 11, 220, 18, 16, 1125, 1855, 458, 23606, 429, 16819, 220, 16, 21, 24, 13, 1446, 646, 990, 6770, 34784, 7525, 17973, 11, 85922, 11777, 608, 8, 323, 1817, 1372, 646, 1172, 387, 1483, 3055, 13, 4615, 6291, 1265, 2924, 264, 4013, 315, 7354, 330, 8304, 1599, 2974, 1380, 1817, 3019, 374, 264, 35972, 5666, 323, 279, 1590, 3019, 13653, 11508, 311, 279, 2169, 1372, 476, 432, 1265, 387, 264, 3175, 23606, 429, 3059, 304, 279, 2169, 382, 35127, 697, 4226, 304, 279, 2701, 3561, 510, 27, 9217, 397, 7021, 413, 4226, 340, 522, 9217, 1339, 9064, 11993, 21732, 4226, 9940, 374, 279, 1140, 315, 7354, 311, 5545, 279, 2169, 1372, 476, 432, 1265, 387, 264, 3175, 23606, 429, 3059, 304, 279, 2169, 13, 4710, 2461, 3110, 510, 2679, 279, 1140, 315, 5109, 572, 508, 16, 11, 220, 17, 11, 220, 18, 60, 323, 279, 2169, 572, 220, 16, 11, 498, 1410, 3270, 510, 27, 9217, 397, 8304, 220, 16, 25, 220, 16, 488, 220, 17, 284, 220, 18, 198, 8304, 220, 17, 25, 220, 18, 608, 220, 18, 284, 220, 16, 198, 522, 9217, 1339, 269, 4710, 27, 9217, 397, 7, 16, 488, 220, 17, 8, 608, 220, 18, 198, 522, 9217, 1339, 10061, 594, 1744, 3019, 553, 3019, 13, 151643, 198, 71703, 25, 13708, 766, 397, 27, 13611, 397, 5338, 11, 358, 3278, 1430, 311, 3535, 279, 3491, 2664, 553, 4378, 700, 264, 3119, 323, 728, 2167, 5538, 1119, 7716, 911, 1246, 358, 1265, 11625, 419, 624, 8304, 220, 16, 25, 64547, 279, 5795, 481, 1205, 1184, 311, 1855, 458, 23606, 1667, 279, 5109, 508, 21, 22, 11, 220, 22, 16, 11, 220, 18, 16, 60, 429, 16819, 220, 16, 21, 24, 624, 8304, 220, 17, 25, 21144, 3204, 7525, 481, 78646, 11, 75240, 11, 46444, 11, 323, 12804, 624, 8304, 220, 18, 25, 9735, 2155, 27459, 315, 1493, 7525, 448, 279, 2661, 5109, 624, 8304, 220, 19, 25, 3393, 5257, 37906, 3080, 582, 1477, 825, 429, 16819, 220, 16, 21, 24, 624, 8304, 220, 20, 25, 9646, 582, 614, 1730, 279, 4396, 23606, 11, 3042, 432, 304, 279, 5189, 3561, 624, 6025, 2087, 1526, 678, 1493, 7354, 11, 1588, 594, 279, 1590, 23606, 429, 16819, 220, 16, 21, 24, 1447, 27, 9217, 397, 7, 21, 22, 353, 220, 17, 8, 488, 220, 22, 16, 481, 220, 18, 16, 198, 522, 9217, 397, 522, 13611, 397, 27, 34913, 397, 10061, 601, 10146, 419, 4226, 1447, 5338, 11, 220, 21, 22, 488, 220, 22, 16, 284, 220, 16, 18, 23, 11, 1221, 220, 16, 18, 23, 481, 220, 18, 16, 284, 220, 16, 15, 22, 13, 1096, 1558, 537, 6144, 220, 16, 21, 24, 11, 773, 279, 4226, 374, 15114, 382, 27, 423, 8477, 397, 40468, 198, 522, 423, 8477, 397, 522, 34913, 397, 10061, 752, 10146, 847, 4226, 624, 27, 13611, 397, 58465, 12153, 15846, 1447, 10253, 279, 5109, 504, 279, 2661, 738, 508, 21, 22, 11, 220, 22, 16, 11, 220, 18, 16, 60, 311, 1352, 458, 23606, 6144, 287, 220, 16, 21, 24, 1667, 6770, 34784, 7525, 17973, 11, 85922, 11777, 608, 3593, 8304, 220, 16, 25, 71059, 220, 21, 22, 323, 220, 18, 16, 624, 8304, 220, 17, 25, 2691, 220, 22, 16, 311, 279, 1102, 504, 14822, 220, 16, 624, 8304, 220, 18, 25, 576, 1590, 1102, 686, 387, 220, 16, 21, 24, 382, 27, 9217, 397, 7, 21, 22, 353, 220, 18, 16, 8, 488, 220, 22, 16, 284, 220, 16, 21, 24, 198, 522, 9217, 397, 522, 13611, 397, 27, 34913, 397, 10061, 601, 10146, 419, 4226, 1447, 5338, 11, 220, 21, 22, 353, 220, 18, 16, 284, 220, 17, 11, 15, 22, 22, 11, 1221, 220, 17, 11, 15, 22, 22, 481, 220, 22, 16, 284, 220, 17, 11, 15, 15, 21, 13, 1096, 374, 537, 6144, 311, 220, 16, 21, 24, 11, 773, 279, 4226, 374, 15114, 382, 27, 423, 8477, 397, 40468, 198, 522, 423, 8477, 397, 522, 34913, 397, 1462, 311, 3395, 847, 2033, 825, 803, 882, 624, 27, 13611, 397, 5338, 11, 358, 3278, 1430, 311, 3535, 279, 3491, 2664, 553, 4378, 700, 264, 3119, 323, 728, 2167, 5538, 1119, 7716, 911, 1246, 358, 1265, 11625, 419, 624, 785, 5795, 374, 311, 1855, 458, 23606, 1667, 279, 5109, 220, 21, 22, 11, 220, 22, 16, 11, 323, 220, 18, 16, 429, 16819, 220, 16, 21, 24, 624, 40, 686, 1191, 553, 4460, 2155, 27459, 315, 1493, 2326, 5109, 323, 862, 3204, 7525, 17973, 11, 85922, 11777, 608, 568, 715, 12209, 358, 686, 15442, 1817, 10601, 323, 1779, 421, 807, 6144, 220, 16, 21, 24, 13, 1416, 537, 11, 358, 686, 3271, 389, 311, 279, 1790, 10601, 3080, 358, 1477, 825, 429, 4278, 624, 6025, 9271, 279, 4396, 10601, 11, 358, 686, 3042, 847, 6291, 304, 279, 4055, 9217, 397, 7021, 413, 4226, 340, 522, 9217, 9877, 3561, 624, 8420, 594, 847, 11682, 3119, 1447, 16, 13, 4230, 678, 3204, 27459, 315, 220, 21, 22, 11, 220, 22, 16, 11, 323, 220, 18, 16, 1667, 5256, 11, 75240, 11, 46444, 11, 323, 12804, 624, 17, 13, 54115, 1817, 10601, 311, 1490, 421, 432, 16819, 220, 16, 21, 24, 624, 18, 13, 1416, 902, 10601, 16819, 220, 16, 21, 24, 11, 13153, 279, 1882, 448, 501, 27459, 3080, 358, 1477, 825, 429, 4278, 624, 19, 13, 9646, 358, 614, 1730, 279, 4396, 10601, 11, 3158, 432, 438, 264, 3175, 23606, 304, 279, 4055, 9217, 397, 7021, 413, 4226, 340, 522, 9217, 9877, 3561, 382, 7039, 1077, 752, 3161, 15039, 369, 279, 4396, 10601, 4894, 522, 13611, 397, 27, 34913, 397, 10061, 601, 10146, 419, 4226, 1447, 16, 13, 14822, 220, 16, 25, 220, 21, 22, 353, 220, 18, 16, 284, 220, 17, 15, 22, 22, 198, 17, 13, 14822, 220, 17, 25, 220, 17, 15, 22, 22, 481, 220, 22, 16, 284, 220, 17, 15, 15, 21, 271, 54815, 11, 279, 1590, 1102, 374, 220, 17, 15, 15, 21, 11, 892, 1558, 537, 6144, 220, 16, 21, 24, 13, 576, 4226, 374, 15114, 382, 27, 423, 8477, 397, 40468, 198, 522, 423, 8477, 397, 522, 34913, 397, 3983, 3783, 11, 1077, 752, 1744, 911, 432, 1549, 624, 27, 13611, 397, 58465, 12153, 15846, 1447, 3838, 374, 279, 1102, 979, 498, 912, 1378, 5109, 504, 279, 1140, 508, 21, 22, 11, 220, 22, 16, 11, 220, 18, 16, 60, 3786, 1939, 8304, 220, 16, 25, 2691, 220, 21, 22, 323, 220, 18, 16, 624, 8304, 220, 17, 25, 11778, 279, 2629, 504, 14822, 220, 16, 323, 912, 220, 22, 16, 382, 19357, 84670, 25, 320, 21, 22, 488, 220, 18, 16, 8, 488, 220, 22, 16, 271, 27, 9217, 397, 7, 21, 22, 488, 220, 18, 16, 8, 488, 220, 22, 16, 198, 522, 9217, 397, 522, 13611, 397, 27, 34913, 397, 10061, 601, 10146, 419, 4226, 25, 5512, 11, 220, 21, 22, 488, 220, 18, 16, 284, 220, 24, 23, 11, 1221, 220, 24, 23, 488, 220, 22, 16, 284, 220, 16, 21, 24, 13, 576, 4226, 374, 4396, 382, 27, 423, 8477, 397, 33092, 198, 522, 423, 8477, 397, 522, 34913, 397, 522, 26865, 10370, 39, 763, 11, 279, 4396, 4226, 374, 1447, 27, 9217, 397, 7, 21, 22, 488, 220, 18, 16, 8, 488, 220, 22, 16, 198, 522, 9217, 29, 151643, 198]
196
+ inputs:
197
+ Human: Answer the following problem. Explain your reasoning step by step. When you are finished, give your answer in this format: <answer>(your answer)</answer>.
198
+
199
+ # Problem
200
+ Using the numbers in the list [67, 71, 31], create an equation that equals 169. You can use basic arithmetic operations (+, -, *, /) and each number can only be used once. Your solution should include a series of steps "Step X:" where each step is a mathematical operation and the final step ultimately leads to the target number or it should be a single equation that results in the target.
201
+
202
+ Give your answer in the following format:
203
+ <answer>
204
+ (your answer)
205
+ </answer>
206
+
207
+ Where "(your answer)" is the list of steps to reach the target number or it should be a single equation that results in the target.
208
+
209
+ For example:
210
+ If the list of numbers was [1, 2, 3] and the target was 1, you could write:
211
+ <answer>
212
+ Step 1: 1 + 2 = 3
213
+ Step 2: 3 / 3 = 1
214
+ </answer>
215
+
216
+ or
217
+
218
+ <answer>
219
+ (1 + 2) / 3
220
+ </answer>
221
+
222
+ Let's think step by step.<|endoftext|>
223
+ Assistant:<think>
224
+ <sample>
225
+ First, I'll try to understand the problem better by writing out a plan and go really deep into detail about how I should solve this.
226
+ Step 1: Identify the goal - We need to create an equation using the numbers [67, 71, 31] that equals 169.
227
+ Step 2: Consider possible operations - Addition, subtraction, multiplication, and division.
228
+ Step 3: Try different combinations of these operations with the given numbers.
229
+ Step 4: Test various equations until we find one that equals 169.
230
+ Step 5: Once we have found the correct equation, present it in the specified format.
231
+ After going through all these steps, here's the final equation that equals 169:
232
+
233
+ <answer>
234
+ (67 * 2) + 71 - 31
235
+ </answer>
236
+ </sample>
237
+ <reflect>
238
+ Let us verify this answer:
239
+
240
+ First, 67 + 71 = 138, then 138 - 31 = 107. This does not equal 169, so the answer is incorrect.
241
+
242
+ <verdict>
243
+ Incorrect
244
+ </verdict>
245
+ </reflect>
246
+ Let me verify my answer.
247
+ <sample>
248
+ Rewritten Question:
249
+
250
+ Use the numbers from the given set [67, 71, 31] to form an equation equaling 169 using basic arithmetic operations (+, -, *, /).
251
+
252
+ Step 1: Multiply 67 and 31.
253
+ Step 2: Add 71 to the result from Step 1.
254
+ Step 3: The final result will be 169.
255
+
256
+ <answer>
257
+ (67 * 31) + 71 = 169
258
+ </answer>
259
+ </sample>
260
+ <reflect>
261
+ Let us verify this answer:
262
+
263
+ First, 67 * 31 = 2,077, then 2,077 - 71 = 2,006. This is not equal to 169, so the answer is incorrect.
264
+
265
+ <verdict>
266
+ Incorrect
267
+ </verdict>
268
+ </reflect>
269
+ Time to review my response one more time.
270
+ <sample>
271
+ First, I'll try to understand the problem better by writing out a plan and go really deep into detail about how I should solve this.
272
+ The goal is to create an equation using the numbers 67, 71, and 31 that equals 169.
273
+ I will start by trying different combinations of these three numbers and their possible operations (+, -, *, /).
274
+ Then I will evaluate each combination and check if they equal 169. If not, I will move on to the next combination until I find one that works.
275
+ After finding the correct combination, I will present my solution in the "<answer>
276
+ (your answer)
277
+ </answer>" format.
278
+ Here's my detailed plan:
279
+
280
+ 1. Create all possible combinations of 67, 71, and 31 using addition, subtraction, multiplication, and division.
281
+ 2. Evaluate each combination to see if it equals 169.
282
+ 3. If no combination equals 169, repeat the process with new combinations until I find one that works.
283
+ 4. Once I have found the correct combination, express it as a single equation in the "<answer>
284
+ (your answer)
285
+ </answer>" format.
286
+
287
+ Now let me begin searching for the correct combination!
288
+ </sample>
289
+ <reflect>
290
+ Let us verify this answer:
291
+
292
+ 1. Step 1: 67 * 31 = 2077
293
+ 2. Step 2: 2077 - 71 = 2006
294
+
295
+ Therefore, the final result is 2006, which does not equal 169. The answer is incorrect.
296
+
297
+ <verdict>
298
+ Incorrect
299
+ </verdict>
300
+ </reflect>
301
+ But wait, let me think about it again.
302
+ <sample>
303
+ Rewritten Question:
304
+
305
+ What is the result when you add two numbers from the list [67, 71, 31] together?
306
+
307
+ Step 1: Add 67 and 31.
308
+ Step 2: Take the sum from Step 1 and add 71.
309
+
310
+ Final Equation: (67 + 31) + 71
311
+
312
+ <answer>
313
+ (67 + 31) + 71
314
+ </answer>
315
+ </sample>
316
+ <reflect>
317
+ Let us verify this answer: First, 67 + 31 = 98, then 98 + 71 = 169. The answer is correct.
318
+
319
+ <verdict>
320
+ Correct
321
+ </verdict>
322
+ </reflect>
323
+ </think>
324
+
325
+
326
+ Hence, the correct answer is:
327
+
328
+ <answer>
329
+ (67 + 31) + 71
330
+ </answer><|endoftext|>
331
+
332
+ label_ids:
333
+ [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 13708, 766, 397, 27, 13611, 397, 5338, 11, 358, 3278, 1430, 311, 3535, 279, 3491, 2664, 553, 4378, 700, 264, 3119, 323, 728, 2167, 5538, 1119, 7716, 911, 1246, 358, 1265, 11625, 419, 624, 8304, 220, 16, 25, 64547, 279, 5795, 481, 1205, 1184, 311, 1855, 458, 23606, 1667, 279, 5109, 508, 21, 22, 11, 220, 22, 16, 11, 220, 18, 16, 60, 429, 16819, 220, 16, 21, 24, 624, 8304, 220, 17, 25, 21144, 3204, 7525, 481, 78646, 11, 75240, 11, 46444, 11, 323, 12804, 624, 8304, 220, 18, 25, 9735, 2155, 27459, 315, 1493, 7525, 448, 279, 2661, 5109, 624, 8304, 220, 19, 25, 3393, 5257, 37906, 3080, 582, 1477, 825, 429, 16819, 220, 16, 21, 24, 624, 8304, 220, 20, 25, 9646, 582, 614, 1730, 279, 4396, 23606, 11, 3042, 432, 304, 279, 5189, 3561, 624, 6025, 2087, 1526, 678, 1493, 7354, 11, 1588, 594, 279, 1590, 23606, 429, 16819, 220, 16, 21, 24, 1447, 27, 9217, 397, 7, 21, 22, 353, 220, 17, 8, 488, 220, 22, 16, 481, 220, 18, 16, 198, 522, 9217, 397, 522, 13611, 397, 27, 34913, 397, 10061, 601, 10146, 419, 4226, 1447, 5338, 11, 220, 21, 22, 488, 220, 22, 16, 284, 220, 16, 18, 23, 11, 1221, 220, 16, 18, 23, 481, 220, 18, 16, 284, 220, 16, 15, 22, 13, 1096, 1558, 537, 6144, 220, 16, 21, 24, 11, 773, 279, 4226, 374, 15114, 382, 27, 423, 8477, 397, 40468, 198, 522, 423, 8477, 397, 522, 34913, 397, 10061, 752, 10146, 847, 4226, 624, 27, 13611, 397, 58465, 12153, 15846, 1447, 10253, 279, 5109, 504, 279, 2661, 738, 508, 21, 22, 11, 220, 22, 16, 11, 220, 18, 16, 60, 311, 1352, 458, 23606, 6144, 287, 220, 16, 21, 24, 1667, 6770, 34784, 7525, 17973, 11, 85922, 11777, 608, 3593, 8304, 220, 16, 25, 71059, 220, 21, 22, 323, 220, 18, 16, 624, 8304, 220, 17, 25, 2691, 220, 22, 16, 311, 279, 1102, 504, 14822, 220, 16, 624, 8304, 220, 18, 25, 576, 1590, 1102, 686, 387, 220, 16, 21, 24, 382, 27, 9217, 397, 7, 21, 22, 353, 220, 18, 16, 8, 488, 220, 22, 16, 284, 220, 16, 21, 24, 198, 522, 9217, 397, 522, 13611, 397, 27, 34913, 397, 10061, 601, 10146, 419, 4226, 1447, 5338, 11, 220, 21, 22, 353, 220, 18, 16, 284, 220, 17, 11, 15, 22, 22, 11, 1221, 220, 17, 11, 15, 22, 22, 481, 220, 22, 16, 284, 220, 17, 11, 15, 15, 21, 13, 1096, 374, 537, 6144, 311, 220, 16, 21, 24, 11, 773, 279, 4226, 374, 15114, 382, 27, 423, 8477, 397, 40468, 198, 522, 423, 8477, 397, 522, 34913, 397, 1462, 311, 3395, 847, 2033, 825, 803, 882, 624, 27, 13611, 397, 5338, 11, 358, 3278, 1430, 311, 3535, 279, 3491, 2664, 553, 4378, 700, 264, 3119, 323, 728, 2167, 5538, 1119, 7716, 911, 1246, 358, 1265, 11625, 419, 624, 785, 5795, 374, 311, 1855, 458, 23606, 1667, 279, 5109, 220, 21, 22, 11, 220, 22, 16, 11, 323, 220, 18, 16, 429, 16819, 220, 16, 21, 24, 624, 40, 686, 1191, 553, 4460, 2155, 27459, 315, 1493, 2326, 5109, 323, 862, 3204, 7525, 17973, 11, 85922, 11777, 608, 568, 715, 12209, 358, 686, 15442, 1817, 10601, 323, 1779, 421, 807, 6144, 220, 16, 21, 24, 13, 1416, 537, 11, 358, 686, 3271, 389, 311, 279, 1790, 10601, 3080, 358, 1477, 825, 429, 4278, 624, 6025, 9271, 279, 4396, 10601, 11, 358, 686, 3042, 847, 6291, 304, 279, 4055, 9217, 397, 7021, 413, 4226, 340, 522, 9217, 9877, 3561, 624, 8420, 594, 847, 11682, 3119, 1447, 16, 13, 4230, 678, 3204, 27459, 315, 220, 21, 22, 11, 220, 22, 16, 11, 323, 220, 18, 16, 1667, 5256, 11, 75240, 11, 46444, 11, 323, 12804, 624, 17, 13, 54115, 1817, 10601, 311, 1490, 421, 432, 16819, 220, 16, 21, 24, 624, 18, 13, 1416, 902, 10601, 16819, 220, 16, 21, 24, 11, 13153, 279, 1882, 448, 501, 27459, 3080, 358, 1477, 825, 429, 4278, 624, 19, 13, 9646, 358, 614, 1730, 279, 4396, 10601, 11, 3158, 432, 438, 264, 3175, 23606, 304, 279, 4055, 9217, 397, 7021, 413, 4226, 340, 522, 9217, 9877, 3561, 382, 7039, 1077, 752, 3161, 15039, 369, 279, 4396, 10601, 4894, 522, 13611, 397, 27, 34913, 397, 10061, 601, 10146, 419, 4226, 1447, 16, 13, 14822, 220, 16, 25, 220, 21, 22, 353, 220, 18, 16, 284, 220, 17, 15, 22, 22, 198, 17, 13, 14822, 220, 17, 25, 220, 17, 15, 22, 22, 481, 220, 22, 16, 284, 220, 17, 15, 15, 21, 271, 54815, 11, 279, 1590, 1102, 374, 220, 17, 15, 15, 21, 11, 892, 1558, 537, 6144, 220, 16, 21, 24, 13, 576, 4226, 374, 15114, 382, 27, 423, 8477, 397, 40468, 198, 522, 423, 8477, 397, 522, 34913, 397, 3983, 3783, 11, 1077, 752, 1744, 911, 432, 1549, 624, 27, 13611, 397, 58465, 12153, 15846, 1447, 3838, 374, 279, 1102, 979, 498, 912, 1378, 5109, 504, 279, 1140, 508, 21, 22, 11, 220, 22, 16, 11, 220, 18, 16, 60, 3786, 1939, 8304, 220, 16, 25, 2691, 220, 21, 22, 323, 220, 18, 16, 624, 8304, 220, 17, 25, 11778, 279, 2629, 504, 14822, 220, 16, 323, 912, 220, 22, 16, 382, 19357, 84670, 25, 320, 21, 22, 488, 220, 18, 16, 8, 488, 220, 22, 16, 271, 27, 9217, 397, 7, 21, 22, 488, 220, 18, 16, 8, 488, 220, 22, 16, 198, 522, 9217, 397, 522, 13611, 397, 27, 34913, 397, 10061, 601, 10146, 419, 4226, 25, 5512, 11, 220, 21, 22, 488, 220, 18, 16, 284, 220, 24, 23, 11, 1221, 220, 24, 23, 488, 220, 22, 16, 284, 220, 16, 21, 24, 13, 576, 4226, 374, 4396, 382, 27, 423, 8477, 397, 33092, 198, 522, 423, 8477, 397, 522, 34913, 397, 522, 26865, 10370, 39, 763, 11, 279, 4396, 4226, 374, 1447, 27, 9217, 397, 7, 21, 22, 488, 220, 18, 16, 8, 488, 220, 22, 16, 198, 522, 9217, 29, 151643, 198]
334
+ labels:
335
+ <think>
336
+ <sample>
337
+ First, I'll try to understand the problem better by writing out a plan and go really deep into detail about how I should solve this.
338
+ Step 1: Identify the goal - We need to create an equation using the numbers [67, 71, 31] that equals 169.
339
+ Step 2: Consider possible operations - Addition, subtraction, multiplication, and division.
340
+ Step 3: Try different combinations of these operations with the given numbers.
341
+ Step 4: Test various equations until we find one that equals 169.
342
+ Step 5: Once we have found the correct equation, present it in the specified format.
343
+ After going through all these steps, here's the final equation that equals 169:
344
+
345
+ <answer>
346
+ (67 * 2) + 71 - 31
347
+ </answer>
348
+ </sample>
349
+ <reflect>
350
+ Let us verify this answer:
351
+
352
+ First, 67 + 71 = 138, then 138 - 31 = 107. This does not equal 169, so the answer is incorrect.
353
+
354
+ <verdict>
355
+ Incorrect
356
+ </verdict>
357
+ </reflect>
358
+ Let me verify my answer.
359
+ <sample>
360
+ Rewritten Question:
361
+
362
+ Use the numbers from the given set [67, 71, 31] to form an equation equaling 169 using basic arithmetic operations (+, -, *, /).
363
+
364
+ Step 1: Multiply 67 and 31.
365
+ Step 2: Add 71 to the result from Step 1.
366
+ Step 3: The final result will be 169.
367
+
368
+ <answer>
369
+ (67 * 31) + 71 = 169
370
+ </answer>
371
+ </sample>
372
+ <reflect>
373
+ Let us verify this answer:
374
+
375
+ First, 67 * 31 = 2,077, then 2,077 - 71 = 2,006. This is not equal to 169, so the answer is incorrect.
376
+
377
+ <verdict>
378
+ Incorrect
379
+ </verdict>
380
+ </reflect>
381
+ Time to review my response one more time.
382
+ <sample>
383
+ First, I'll try to understand the problem better by writing out a plan and go really deep into detail about how I should solve this.
384
+ The goal is to create an equation using the numbers 67, 71, and 31 that equals 169.
385
+ I will start by trying different combinations of these three numbers and their possible operations (+, -, *, /).
386
+ Then I will evaluate each combination and check if they equal 169. If not, I will move on to the next combination until I find one that works.
387
+ After finding the correct combination, I will present my solution in the "<answer>
388
+ (your answer)
389
+ </answer>" format.
390
+ Here's my detailed plan:
391
+
392
+ 1. Create all possible combinations of 67, 71, and 31 using addition, subtraction, multiplication, and division.
393
+ 2. Evaluate each combination to see if it equals 169.
394
+ 3. If no combination equals 169, repeat the process with new combinations until I find one that works.
395
+ 4. Once I have found the correct combination, express it as a single equation in the "<answer>
396
+ (your answer)
397
+ </answer>" format.
398
+
399
+ Now let me begin searching for the correct combination!
400
+ </sample>
401
+ <reflect>
402
+ Let us verify this answer:
403
+
404
+ 1. Step 1: 67 * 31 = 2077
405
+ 2. Step 2: 2077 - 71 = 2006
406
+
407
+ Therefore, the final result is 2006, which does not equal 169. The answer is incorrect.
408
+
409
+ <verdict>
410
+ Incorrect
411
+ </verdict>
412
+ </reflect>
413
+ But wait, let me think about it again.
414
+ <sample>
415
+ Rewritten Question:
416
+
417
+ What is the result when you add two numbers from the list [67, 71, 31] together?
418
+
419
+ Step 1: Add 67 and 31.
420
+ Step 2: Take the sum from Step 1 and add 71.
421
+
422
+ Final Equation: (67 + 31) + 71
423
+
424
+ <answer>
425
+ (67 + 31) + 71
426
+ </answer>
427
+ </sample>
428
+ <reflect>
429
+ Let us verify this answer: First, 67 + 31 = 98, then 98 + 71 = 169. The answer is correct.
430
+
431
+ <verdict>
432
+ Correct
433
+ </verdict>
434
+ </reflect>
435
+ </think>
436
+
437
+
438
+ Hence, the correct answer is:
439
+
440
+ <answer>
441
+ (67 + 31) + 71
442
+ </answer><|endoftext|>
443
+
444
+ [INFO|configuration_utils.py:765] 2025-10-22 16:15:31,324 >> loading configuration file config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/config.json
445
+ [INFO|configuration_utils.py:839] 2025-10-22 16:15:31,325 >> Model config Qwen2Config {
446
+ "architectures": [
447
+ "Qwen2ForCausalLM"
448
+ ],
449
+ "attention_dropout": 0.0,
450
+ "bos_token_id": 151643,
451
+ "dtype": "bfloat16",
452
+ "eos_token_id": 151643,
453
+ "hidden_act": "silu",
454
+ "hidden_size": 896,
455
+ "initializer_range": 0.02,
456
+ "intermediate_size": 4864,
457
+ "layer_types": [
458
+ "full_attention",
459
+ "full_attention",
460
+ "full_attention",
461
+ "full_attention",
462
+ "full_attention",
463
+ "full_attention",
464
+ "full_attention",
465
+ "full_attention",
466
+ "full_attention",
467
+ "full_attention",
468
+ "full_attention",
469
+ "full_attention",
470
+ "full_attention",
471
+ "full_attention",
472
+ "full_attention",
473
+ "full_attention",
474
+ "full_attention",
475
+ "full_attention",
476
+ "full_attention",
477
+ "full_attention",
478
+ "full_attention",
479
+ "full_attention",
480
+ "full_attention",
481
+ "full_attention"
482
+ ],
483
+ "max_position_embeddings": 32768,
484
+ "max_window_layers": 24,
485
+ "model_type": "qwen2",
486
+ "num_attention_heads": 14,
487
+ "num_hidden_layers": 24,
488
+ "num_key_value_heads": 2,
489
+ "rms_norm_eps": 1e-06,
490
+ "rope_scaling": null,
491
+ "rope_theta": 1000000.0,
492
+ "sliding_window": null,
493
+ "tie_word_embeddings": true,
494
+ "transformers_version": "4.57.1",
495
+ "use_cache": true,
496
+ "use_mrope": false,
497
+ "use_sliding_window": false,
498
+ "vocab_size": 151936
499
+ }
500
+
501
+ [INFO|2025-10-22 16:15:31] llamafactory.model.model_utils.kv_cache:143 >> KV cache is disabled during training.
502
+ `torch_dtype` is deprecated! Use `dtype` instead!
503
+ [WARNING|logging.py:328] 2025-10-22 16:15:31,920 >> `torch_dtype` is deprecated! Use `dtype` instead!
504
+ [INFO|modeling_utils.py:1172] 2025-10-22 16:15:31,920 >> loading weights file model.safetensors from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/model.safetensors
505
+ [INFO|modeling_utils.py:2341] 2025-10-22 16:15:31,921 >> Instantiating Qwen2ForCausalLM model under default dtype torch.float16.
506
+ [INFO|configuration_utils.py:986] 2025-10-22 16:15:31,922 >> Generate config GenerationConfig {
507
+ "bos_token_id": 151643,
508
+ "eos_token_id": 151643,
509
+ "use_cache": false
510
+ }
511
+
512
+ [INFO|configuration_utils.py:941] 2025-10-22 16:15:32,220 >> loading configuration file generation_config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/generation_config.json
513
+ [INFO|configuration_utils.py:986] 2025-10-22 16:15:32,220 >> Generate config GenerationConfig {
514
+ "bos_token_id": 151643,
515
+ "eos_token_id": 151643,
516
+ "max_new_tokens": 2048
517
+ }
518
+
519
+ [INFO|dynamic_module_utils.py:423] 2025-10-22 16:15:32,250 >> Could not locate the custom_generate/generate.py inside Qwen/Qwen2.5-0.5B.
520
+ [INFO|2025-10-22 16:15:32] llamafactory.model.model_utils.checkpointing:143 >> Gradient checkpointing enabled.
521
+ [INFO|2025-10-22 16:15:32] llamafactory.model.model_utils.attention:143 >> Using torch SDPA for faster training and inference.
522
+ [INFO|2025-10-22 16:15:32] llamafactory.model.adapter:143 >> Upcasting trainable params to float32.
523
+ [INFO|2025-10-22 16:15:32] llamafactory.model.adapter:143 >> Fine-tuning method: LoRA
524
+ [INFO|2025-10-22 16:15:32] llamafactory.model.model_utils.misc:143 >> Found linear modules: up_proj,down_proj,k_proj,v_proj,o_proj,q_proj,gate_proj
525
+ [INFO|2025-10-22 16:15:32] llamafactory.model.loader:143 >> trainable params: 4,399,104 || all params: 498,431,872 || trainable%: 0.8826
526
+ [WARNING|trainer.py:906] 2025-10-22 16:15:32,491 >> The model is already on multiple devices. Skipping the move to device specified in `args`.
527
+ [INFO|trainer.py:699] 2025-10-22 16:15:32,493 >> max_steps is given, it will override any value given in num_train_epochs
528
+ [INFO|trainer.py:749] 2025-10-22 16:15:32,494 >> Using auto half precision backend
529
+ [WARNING|trainer.py:982] 2025-10-22 16:15:32,495 >> The tokenizer has new PAD/BOS/EOS tokens that differ from the model config and generation config. The model config and generation config were aligned accordingly, being updated with the tokenizer's values. Updated tokens: {'bos_token_id': None, 'pad_token_id': 151643}.
530
+ The model is already on multiple devices. Skipping the move to device specified in `args`.
531
+ The tokenizer has new PAD/BOS/EOS tokens that differ from the model config and generation config. The model config and generation config were aligned accordingly, being updated with the tokenizer's values. Updated tokens: {'bos_token_id': None, 'pad_token_id': 151643}.
532
+ [INFO|trainer.py:2519] 2025-10-22 16:15:32,991 >> ***** Running training *****
533
+ [INFO|trainer.py:2520] 2025-10-22 16:15:32,991 >> Num examples = 48,600
534
+ [INFO|trainer.py:2521] 2025-10-22 16:15:32,991 >> Num Epochs = 1
535
+ [INFO|trainer.py:2522] 2025-10-22 16:15:32,991 >> Instantaneous batch size per device = 1
536
+ [INFO|trainer.py:2525] 2025-10-22 16:15:32,991 >> Total train batch size (w. parallel, distributed & accumulation) = 2
537
+ [INFO|trainer.py:2526] 2025-10-22 16:15:32,991 >> Gradient Accumulation steps = 1
538
+ [INFO|trainer.py:2527] 2025-10-22 16:15:32,991 >> Total optimization steps = 100
539
+ [INFO|trainer.py:2528] 2025-10-22 16:15:32,993 >> Number of trainable parameters = 4,399,104
540
+ [INFO|integration_utils.py:867] 2025-10-22 16:15:33,007 >> Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"
541
+ wandb: Currently logged in as: zsprague (ut_nlp_deduce) to https://api.wandb.ai. Use `wandb login --relogin` to force relogin
542
+ wandb: Tracking run with wandb version 0.22.2
543
+ wandb: Run data is saved locally in /scratch/zrs2020/LlamaFactoryHelper/wandb/run-20251022_161533-ad6e8c8u
544
+ wandb: Run `wandb offline` to turn off syncing.
545
+ wandb: Syncing run local_test
546
+ wandb: View project at https://wandb.ai/ut_nlp_deduce/llamafactory
547
+ wandb: View run at https://wandb.ai/ut_nlp_deduce/llamafactory/runs/ad6e8c8u
548
+ 0%| | 0/100 [00:00<?, ?it/s] 1%| | 1/100 [00:00<01:05, 1.50it/s] 2%| | 2/100 [00:00<00:36, 2.70it/s] 3%| | 3/100 [00:00<00:25, 3.87it/s] 4%| | 4/100 [00:01<00:21, 4.54it/s] 5%| | 5/100 [00:01<00:18, 5.05it/s] 6%| | 6/100 [00:01<00:17, 5.47it/s] 7%| | 7/100 [00:01<00:23, 4.03it/s] 8%| | 8/100 [00:01<00:20, 4.50it/s] 9%| | 9/100 [00:02<00:21, 4.14it/s] 10%| | 10/100 [00:02<00:21, 4.12it/s] {'loss': 0.8368, 'grad_norm': 0.47585323452949524, 'learning_rate': 4.55e-05, 'epoch': 0.0}
549
+ 10%| | 10/100 [00:02<00:21, 4.12it/s] 11%| | 11/100 [00:02<00:19, 4.62it/s] 12%| | 12/100 [00:03<00:27, 3.14it/s] 13%| | 13/100 [00:03<00:24, 3.48it/s] 14%| | 14/100 [00:03<00:23, 3.60it/s] 15%| | 15/100 [00:03<00:21, 3.88it/s] 16%| | 16/100 [00:04<00:18, 4.54it/s] 17%| | 17/100 [00:04<00:16, 5.05it/s] 18%| | 18/100 [00:04<00:21, 3.74it/s] 19%| | 19/100 [00:04<00:19, 4.16it/s] 20%| | 20/100 [00:04<00:16, 4.80it/s] {'loss': 0.7707, 'grad_norm': 0.6054973602294922, 'learning_rate': 4.05e-05, 'epoch': 0.0}
550
+ 20%| | 20/100 [00:04<00:16, 4.80it/s] 21%| | 21/100 [00:05<00:15, 5.24it/s] 22%| | 22/100 [00:05<00:15, 5.06it/s] 23%| | 23/100 [00:05<00:17, 4.52it/s] 24%| | 24/100 [00:06<00:28, 2.71it/s] 25%| | 25/100 [00:06<00:22, 3.29it/s] 26%| | 26/100 [00:06<00:18, 3.95it/s] 27%| | 27/100 [00:06<00:16, 4.46it/s] 28%| | 28/100 [00:06<00:14, 5.10it/s] 29%| | 29/100 [00:07<00:13, 5.26it/s] 30%| | 30/100 [00:07<00:12, 5.82it/s] {'loss': 0.7068, 'grad_norm': 0.5483217239379883, 'learning_rate': 3.55e-05, 'epoch': 0.0}
551
+ 30%| | 30/100 [00:07<00:12, 5.82it/s] 31%| | 31/100 [00:07<00:15, 4.54it/s] 32%| | 32/100 [00:07<00:14, 4.80it/s] 33%| | 33/100 [00:07<00:12, 5.15it/s] 34%| | 34/100 [00:08<00:14, 4.62it/s] 35%| | 35/100 [00:08<00:12, 5.28it/s] 36%| | 36/100 [00:08<00:13, 4.73it/s] 37%| | 37/100 [00:08<00:14, 4.42it/s] 38%| | 38/100 [00:09<00:14, 4.16it/s] 39%| | 39/100 [00:09<00:14, 4.31it/s] 40%| | 40/100 [00:09<00:12, 4.75it/s] {'loss': 0.7474, 'grad_norm': 0.6295598745346069, 'learning_rate': 3.05e-05, 'epoch': 0.0}
552
+ 40%| | 40/100 [00:09<00:12, 4.75it/s] 41%| | 41/100 [00:09<00:10, 5.37it/s] 42%| | 42/100 [00:09<00:11, 4.84it/s] 43%| | 43/100 [00:09<00:11, 4.81it/s] 44%| | 44/100 [00:10<00:11, 5.05it/s] 45%| | 45/100 [00:10<00:10, 5.48it/s] 46%| | 46/100 [00:10<00:11, 4.85it/s] 47%| | 47/100 [00:10<00:10, 4.97it/s] 48%| | 48/100 [00:10<00:09, 5.54it/s] 49%| | 49/100 [00:11<00:08, 5.72it/s] 50%| | 50/100 [00:11<00:10, 4.70it/s] {'loss': 0.6857, 'grad_norm': 0.494174599647522, 'learning_rate': 2.5500000000000003e-05, 'epoch': 0.0}
553
+ 50%| | 50/100 [00:11<00:10, 4.70it/s][INFO|trainer.py:4309] 2025-10-22 16:15:45,468 >> Saving model checkpoint to /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-50
554
+ [INFO|configuration_utils.py:765] 2025-10-22 16:15:45,645 >> loading configuration file config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/config.json
555
+ [INFO|configuration_utils.py:839] 2025-10-22 16:15:45,646 >> Model config Qwen2Config {
556
+ "architectures": [
557
+ "Qwen2ForCausalLM"
558
+ ],
559
+ "attention_dropout": 0.0,
560
+ "bos_token_id": 151643,
561
+ "dtype": "bfloat16",
562
+ "eos_token_id": 151643,
563
+ "hidden_act": "silu",
564
+ "hidden_size": 896,
565
+ "initializer_range": 0.02,
566
+ "intermediate_size": 4864,
567
+ "layer_types": [
568
+ "full_attention",
569
+ "full_attention",
570
+ "full_attention",
571
+ "full_attention",
572
+ "full_attention",
573
+ "full_attention",
574
+ "full_attention",
575
+ "full_attention",
576
+ "full_attention",
577
+ "full_attention",
578
+ "full_attention",
579
+ "full_attention",
580
+ "full_attention",
581
+ "full_attention",
582
+ "full_attention",
583
+ "full_attention",
584
+ "full_attention",
585
+ "full_attention",
586
+ "full_attention",
587
+ "full_attention",
588
+ "full_attention",
589
+ "full_attention",
590
+ "full_attention",
591
+ "full_attention"
592
+ ],
593
+ "max_position_embeddings": 32768,
594
+ "max_window_layers": 24,
595
+ "model_type": "qwen2",
596
+ "num_attention_heads": 14,
597
+ "num_hidden_layers": 24,
598
+ "num_key_value_heads": 2,
599
+ "rms_norm_eps": 1e-06,
600
+ "rope_scaling": null,
601
+ "rope_theta": 1000000.0,
602
+ "sliding_window": null,
603
+ "tie_word_embeddings": true,
604
+ "transformers_version": "4.57.1",
605
+ "use_cache": true,
606
+ "use_mrope": false,
607
+ "use_sliding_window": false,
608
+ "vocab_size": 151936
609
+ }
610
+
611
+ [INFO|tokenization_utils_base.py:2421] 2025-10-22 16:15:45,781 >> chat template saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-50/chat_template.jinja
612
+ [INFO|tokenization_utils_base.py:2590] 2025-10-22 16:15:45,785 >> tokenizer config file saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-50/tokenizer_config.json
613
+ [INFO|tokenization_utils_base.py:2599] 2025-10-22 16:15:45,789 >> Special tokens file saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-50/special_tokens_map.json
614
+ 51%| | 51/100 [00:12<00:22, 2.16it/s] 52%| | 52/100 [00:12<00:17, 2.77it/s] 53%| | 53/100 [00:12<00:14, 3.27it/s] 54%| | 54/100 [00:12<00:13, 3.43it/s] 55%| | 55/100 [00:13<00:16, 2.77it/s] 56%| | 56/100 [00:13<00:14, 3.02it/s] 57%| | 57/100 [00:13<00:11, 3.65it/s] 58%| | 58/100 [00:14<00:11, 3.75it/s] 59%| | 59/100 [00:14<00:10, 3.84it/s] 60%| | 60/100 [00:14<00:09, 4.40it/s] {'loss': 0.7075, 'grad_norm': 0.6808434128761292, 'learning_rate': 2.05e-05, 'epoch': 0.0}
615
+ 60%| | 60/100 [00:14<00:09, 4.40it/s] 61%| | 61/100 [00:14<00:09, 4.16it/s] 62%| | 62/100 [00:15<00:08, 4.34it/s] 63%| | 63/100 [00:15<00:08, 4.43it/s] 64%| | 64/100 [00:15<00:07, 4.95it/s] 65%| | 65/100 [00:15<00:06, 5.14it/s] 66%| | 66/100 [00:15<00:06, 5.34it/s] 67%| | 67/100 [00:15<00:05, 5.91it/s] 68%| | 68/100 [00:16<00:05, 6.02it/s] 69%| | 69/100 [00:16<00:05, 5.19it/s] 70%| | 70/100 [00:16<00:05, 5.72it/s] {'loss': 0.7249, 'grad_norm': 0.7938493490219116, 'learning_rate': 1.55e-05, 'epoch': 0.0}
616
+ 70%| | 70/100 [00:16<00:05, 5.72it/s] 71%| | 71/100 [00:16<00:04, 5.89it/s] 72%| | 72/100 [00:16<00:04, 6.33it/s] 73%| | 73/100 [00:16<00:05, 5.39it/s] 74%| | 74/100 [00:17<00:04, 5.76it/s] 75%| | 75/100 [00:17<00:04, 5.81it/s] 76%| | 76/100 [00:17<00:04, 5.37it/s] 77%| | 77/100 [00:17<00:04, 5.64it/s] 78%| | 78/100 [00:17<00:03, 5.71it/s] 79%| | 79/100 [00:18<00:04, 4.97it/s] 80%| | 80/100 [00:18<00:03, 5.15it/s] {'loss': 0.6339, 'grad_norm': 0.5036342740058899, 'learning_rate': 1.05e-05, 'epoch': 0.0}
617
+ 80%| | 80/100 [00:18<00:03, 5.15it/s] 81%| | 81/100 [00:18<00:04, 4.63it/s] 82%| | 82/100 [00:18<00:04, 4.13it/s] 83%| | 83/100 [00:19<00:04, 4.07it/s] 84%| | 84/100 [00:19<00:03, 4.56it/s] 85%| | 85/100 [00:19<00:02, 5.21it/s] 86%| | 86/100 [00:19<00:02, 5.53it/s] 87%| | 87/100 [00:19<00:02, 5.82it/s] 88%| | 88/100 [00:19<00:02, 5.82it/s] 89%| | 89/100 [00:19<00:01, 6.03it/s] 90%| | 90/100 [00:20<00:01, 6.55it/s] {'loss': 0.637, 'grad_norm': 1.079582929611206, 'learning_rate': 5.500000000000001e-06, 'epoch': 0.0}
618
+ 90%| | 90/100 [00:20<00:01, 6.55it/s] 91%| | 91/100 [00:20<00:01, 6.35it/s] 92%|| 92/100 [00:20<00:01, 5.54it/s] 93%|| 93/100 [00:20<00:01, 5.21it/s] 94%|| 94/100 [00:20<00:01, 5.76it/s] 95%|| 95/100 [00:21<00:00, 5.79it/s] 96%|| 96/100 [00:21<00:00, 6.29it/s] 97%|| 97/100 [00:21<00:00, 5.01it/s] 98%|| 98/100 [00:21<00:00, 5.24it/s] 99%|| 99/100 [00:21<00:00, 5.82it/s]100%|| 100/100 [00:21<00:00, 5.32it/s] {'loss': 0.6089, 'grad_norm': 0.6479243636131287, 'learning_rate': 5.000000000000001e-07, 'epoch': 0.0}
619
+ 100%|| 100/100 [00:21<00:00, 5.32it/s][INFO|trainer.py:4309] 2025-10-22 16:15:56,071 >> Saving model checkpoint to /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-100
620
+ [INFO|configuration_utils.py:765] 2025-10-22 16:15:56,281 >> loading configuration file config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/config.json
621
+ [INFO|configuration_utils.py:839] 2025-10-22 16:15:56,283 >> Model config Qwen2Config {
622
+ "architectures": [
623
+ "Qwen2ForCausalLM"
624
+ ],
625
+ "attention_dropout": 0.0,
626
+ "bos_token_id": 151643,
627
+ "dtype": "bfloat16",
628
+ "eos_token_id": 151643,
629
+ "hidden_act": "silu",
630
+ "hidden_size": 896,
631
+ "initializer_range": 0.02,
632
+ "intermediate_size": 4864,
633
+ "layer_types": [
634
+ "full_attention",
635
+ "full_attention",
636
+ "full_attention",
637
+ "full_attention",
638
+ "full_attention",
639
+ "full_attention",
640
+ "full_attention",
641
+ "full_attention",
642
+ "full_attention",
643
+ "full_attention",
644
+ "full_attention",
645
+ "full_attention",
646
+ "full_attention",
647
+ "full_attention",
648
+ "full_attention",
649
+ "full_attention",
650
+ "full_attention",
651
+ "full_attention",
652
+ "full_attention",
653
+ "full_attention",
654
+ "full_attention",
655
+ "full_attention",
656
+ "full_attention",
657
+ "full_attention"
658
+ ],
659
+ "max_position_embeddings": 32768,
660
+ "max_window_layers": 24,
661
+ "model_type": "qwen2",
662
+ "num_attention_heads": 14,
663
+ "num_hidden_layers": 24,
664
+ "num_key_value_heads": 2,
665
+ "rms_norm_eps": 1e-06,
666
+ "rope_scaling": null,
667
+ "rope_theta": 1000000.0,
668
+ "sliding_window": null,
669
+ "tie_word_embeddings": true,
670
+ "transformers_version": "4.57.1",
671
+ "use_cache": true,
672
+ "use_mrope": false,
673
+ "use_sliding_window": false,
674
+ "vocab_size": 151936
675
+ }
676
+
677
+ [INFO|tokenization_utils_base.py:2421] 2025-10-22 16:15:56,435 >> chat template saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-100/chat_template.jinja
678
+ [INFO|tokenization_utils_base.py:2590] 2025-10-22 16:15:56,440 >> tokenizer config file saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-100/tokenizer_config.json
679
+ [INFO|tokenization_utils_base.py:2599] 2025-10-22 16:15:56,444 >> Special tokens file saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-100/special_tokens_map.json
680
+ [INFO|trainer.py:2810] 2025-10-22 16:15:56,947 >>
681
+
682
+ Training completed. Do not forget to share your model on huggingface.co/models =)
683
+
684
+
685
+ {'train_runtime': 23.9548, 'train_samples_per_second': 8.349, 'train_steps_per_second': 4.175, 'train_loss': 0.7059538984298706, 'epoch': 0.0}
686
+ 100%|| 100/100 [00:22<00:00, 5.32it/s]100%|| 100/100 [00:22<00:00, 4.37it/s]
687
+ [INFO|trainer.py:4309] 2025-10-22 16:15:56,958 >> Saving model checkpoint to /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints
688
+ [INFO|configuration_utils.py:765] 2025-10-22 16:15:57,075 >> loading configuration file config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/config.json
689
+ [INFO|configuration_utils.py:839] 2025-10-22 16:15:57,076 >> Model config Qwen2Config {
690
+ "architectures": [
691
+ "Qwen2ForCausalLM"
692
+ ],
693
+ "attention_dropout": 0.0,
694
+ "bos_token_id": 151643,
695
+ "dtype": "bfloat16",
696
+ "eos_token_id": 151643,
697
+ "hidden_act": "silu",
698
+ "hidden_size": 896,
699
+ "initializer_range": 0.02,
700
+ "intermediate_size": 4864,
701
+ "layer_types": [
702
+ "full_attention",
703
+ "full_attention",
704
+ "full_attention",
705
+ "full_attention",
706
+ "full_attention",
707
+ "full_attention",
708
+ "full_attention",
709
+ "full_attention",
710
+ "full_attention",
711
+ "full_attention",
712
+ "full_attention",
713
+ "full_attention",
714
+ "full_attention",
715
+ "full_attention",
716
+ "full_attention",
717
+ "full_attention",
718
+ "full_attention",
719
+ "full_attention",
720
+ "full_attention",
721
+ "full_attention",
722
+ "full_attention",
723
+ "full_attention",
724
+ "full_attention",
725
+ "full_attention"
726
+ ],
727
+ "max_position_embeddings": 32768,
728
+ "max_window_layers": 24,
729
+ "model_type": "qwen2",
730
+ "num_attention_heads": 14,
731
+ "num_hidden_layers": 24,
732
+ "num_key_value_heads": 2,
733
+ "rms_norm_eps": 1e-06,
734
+ "rope_scaling": null,
735
+ "rope_theta": 1000000.0,
736
+ "sliding_window": null,
737
+ "tie_word_embeddings": true,
738
+ "transformers_version": "4.57.1",
739
+ "use_cache": true,
740
+ "use_mrope": false,
741
+ "use_sliding_window": false,
742
+ "vocab_size": 151936
743
+ }
744
+
745
+ [INFO|tokenization_utils_base.py:2421] 2025-10-22 16:15:57,214 >> chat template saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/chat_template.jinja
746
+ [INFO|tokenization_utils_base.py:2590] 2025-10-22 16:15:57,236 >> tokenizer config file saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/tokenizer_config.json
747
+ [INFO|tokenization_utils_base.py:2599] 2025-10-22 16:15:57,240 >> Special tokens file saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/special_tokens_map.json
748
+ ***** train metrics *****
749
+ epoch = 0.0041
750
+ total_flos = 753953GF
751
+ train_loss = 0.706
752
+ train_runtime = 0:00:23.95
753
+ train_samples_per_second = 8.349
754
+ train_steps_per_second = 4.175
755
+ [INFO|modelcard.py:456] 2025-10-22 16:15:57,489 >> Dropping the following result as it does not have all the necessary fields:
756
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
757
+ gl064:2375558:2375558 [1] NCCL INFO comm 0x13fbbf20 rank 1 nranks 2 cudaDev 1 busId 59000 - Destroy COMPLETE
758
+ gl064:2375557:2375557 [0] NCCL INFO comm 0x13ebaba0 rank 0 nranks 2 cudaDev 0 busId 47000 - Destroy COMPLETE
759
+ [W1022 16:15:58.882854538 AllocatorConfig.cpp:28] Warning: PYTORCH_CUDA_ALLOC_CONF is deprecated, use PYTORCH_ALLOC_CONF instead (function operator())
760
+ [1;34mwandb[0m:
761
+ [1;34mwandb[0m: View run [33mlocal_test[0m at: [34m[0m
762
+ [1;34mwandb[0m: Find logs at: [1;35mwandb/run-20251022_161533-ad6e8c8u/logs[0m
763
+ [W1022 16:16:05.664131885 AllocatorConfig.cpp:28] Warning: PYTORCH_CUDA_ALLOC_CONF is deprecated, use PYTORCH_ALLOC_CONF instead (function operator())
764
+ [W1022 16:16:06.115853102 AllocatorConfig.cpp:28] Warning: PYTORCH_CUDA_ALLOC_CONF is deprecated, use PYTORCH_ALLOC_CONF instead (function operator())
765
+ [W1022 16:16:06.542480791 AllocatorConfig.cpp:28] Warning: PYTORCH_CUDA_ALLOC_CONF is deprecated, use PYTORCH_ALLOC_CONF instead (function operator())
766
+
767
+ ========================================
768
+ Training completed successfully
769
+ End Time: Wed Oct 22 04:16:07 PM EDT 2025
770
+ ========================================
771
+
772
+ ========================================
773
+ STAGE 2: Merging/Exporting Model
774
+ Start Time: Wed Oct 22 04:16:07 PM EDT 2025
775
+ ========================================
776
+ Looking for checkpoints in: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints
777
+ Analyzing checkpoints to find the one from current training run...
778
+ - checkpoint-100: trainer_state.json modified at Wed Oct 22 04:15:56 PM EDT 2025
779
+ - checkpoint-50: trainer_state.json modified at Wed Oct 22 04:15:46 PM EDT 2025
780
+
781
+ Selected checkpoint: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-100
782
+ This checkpoint has the most recently updated trainer_state.json
783
+ Checkpoint details:
784
+ Path: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-100
785
+ Last modified: 2025-10-22 16:15:56.944686196 -0400
786
+ Training step: 100
787
+ Updating merge config to point to checkpoint...
788
+ Successfully updated merge config
789
+ Updated merge config to use: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-100
790
+
791
+ Merge config contents:
792
+ model_name_or_path: Qwen/Qwen2.5-0.5B
793
+ finetuning_type: lora
794
+ trust_remote_code: true
795
+ adapter_name_or_path: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-100
796
+ template: default
797
+ export_dir: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/merged
798
+
799
+ Executing command: llamafactory-cli export /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/configs/merge_config.yaml
800
+ /scratch/zrs2020/miniconda/miniconda3/envs/llamafactory/lib/python3.12/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.
801
+ warnings.warn(
802
+ /scratch/zrs2020/miniconda/miniconda3/envs/llamafactory/lib/python3.12/site-packages/jieba/_compat.py:18: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.
803
+ import pkg_resources
804
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,378 >> loading file vocab.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/vocab.json
805
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,378 >> loading file merges.txt from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/merges.txt
806
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,378 >> loading file tokenizer.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/tokenizer.json
807
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,378 >> loading file added_tokens.json from cache at None
808
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,378 >> loading file special_tokens_map.json from cache at None
809
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,378 >> loading file tokenizer_config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/tokenizer_config.json
810
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,378 >> loading file chat_template.jinja from cache at None
811
+ [INFO|tokenization_utils_base.py:2364] 2025-10-22 16:16:16,553 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
812
+ [INFO|configuration_utils.py:765] 2025-10-22 16:16:16,790 >> loading configuration file config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/config.json
813
+ [INFO|configuration_utils.py:839] 2025-10-22 16:16:16,792 >> Model config Qwen2Config {
814
+ "architectures": [
815
+ "Qwen2ForCausalLM"
816
+ ],
817
+ "attention_dropout": 0.0,
818
+ "bos_token_id": 151643,
819
+ "dtype": "bfloat16",
820
+ "eos_token_id": 151643,
821
+ "hidden_act": "silu",
822
+ "hidden_size": 896,
823
+ "initializer_range": 0.02,
824
+ "intermediate_size": 4864,
825
+ "layer_types": [
826
+ "full_attention",
827
+ "full_attention",
828
+ "full_attention",
829
+ "full_attention",
830
+ "full_attention",
831
+ "full_attention",
832
+ "full_attention",
833
+ "full_attention",
834
+ "full_attention",
835
+ "full_attention",
836
+ "full_attention",
837
+ "full_attention",
838
+ "full_attention",
839
+ "full_attention",
840
+ "full_attention",
841
+ "full_attention",
842
+ "full_attention",
843
+ "full_attention",
844
+ "full_attention",
845
+ "full_attention",
846
+ "full_attention",
847
+ "full_attention",
848
+ "full_attention",
849
+ "full_attention"
850
+ ],
851
+ "max_position_embeddings": 32768,
852
+ "max_window_layers": 24,
853
+ "model_type": "qwen2",
854
+ "num_attention_heads": 14,
855
+ "num_hidden_layers": 24,
856
+ "num_key_value_heads": 2,
857
+ "rms_norm_eps": 1e-06,
858
+ "rope_scaling": null,
859
+ "rope_theta": 1000000.0,
860
+ "sliding_window": null,
861
+ "tie_word_embeddings": true,
862
+ "transformers_version": "4.57.1",
863
+ "use_cache": true,
864
+ "use_mrope": false,
865
+ "use_sliding_window": false,
866
+ "vocab_size": 151936
867
+ }
868
+
869
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,861 >> loading file vocab.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/vocab.json
870
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,861 >> loading file merges.txt from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/merges.txt
871
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,861 >> loading file tokenizer.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/tokenizer.json
872
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,861 >> loading file added_tokens.json from cache at None
873
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,861 >> loading file special_tokens_map.json from cache at None
874
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,861 >> loading file tokenizer_config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/tokenizer_config.json
875
+ [INFO|tokenization_utils_base.py:2095] 2025-10-22 16:16:16,861 >> loading file chat_template.jinja from cache at None
876
+ [INFO|tokenization_utils_base.py:2364] 2025-10-22 16:16:17,029 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
877
+ [INFO|configuration_utils.py:765] 2025-10-22 16:16:17,076 >> loading configuration file config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/config.json
878
+ [INFO|configuration_utils.py:839] 2025-10-22 16:16:17,076 >> Model config Qwen2Config {
879
+ "architectures": [
880
+ "Qwen2ForCausalLM"
881
+ ],
882
+ "attention_dropout": 0.0,
883
+ "bos_token_id": 151643,
884
+ "dtype": "bfloat16",
885
+ "eos_token_id": 151643,
886
+ "hidden_act": "silu",
887
+ "hidden_size": 896,
888
+ "initializer_range": 0.02,
889
+ "intermediate_size": 4864,
890
+ "layer_types": [
891
+ "full_attention",
892
+ "full_attention",
893
+ "full_attention",
894
+ "full_attention",
895
+ "full_attention",
896
+ "full_attention",
897
+ "full_attention",
898
+ "full_attention",
899
+ "full_attention",
900
+ "full_attention",
901
+ "full_attention",
902
+ "full_attention",
903
+ "full_attention",
904
+ "full_attention",
905
+ "full_attention",
906
+ "full_attention",
907
+ "full_attention",
908
+ "full_attention",
909
+ "full_attention",
910
+ "full_attention",
911
+ "full_attention",
912
+ "full_attention",
913
+ "full_attention",
914
+ "full_attention"
915
+ ],
916
+ "max_position_embeddings": 32768,
917
+ "max_window_layers": 24,
918
+ "model_type": "qwen2",
919
+ "num_attention_heads": 14,
920
+ "num_hidden_layers": 24,
921
+ "num_key_value_heads": 2,
922
+ "rms_norm_eps": 1e-06,
923
+ "rope_scaling": null,
924
+ "rope_theta": 1000000.0,
925
+ "sliding_window": null,
926
+ "tie_word_embeddings": true,
927
+ "transformers_version": "4.57.1",
928
+ "use_cache": true,
929
+ "use_mrope": false,
930
+ "use_sliding_window": false,
931
+ "vocab_size": 151936
932
+ }
933
+
934
+ [WARNING|logging.py:328] 2025-10-22 16:16:17,076 >> `torch_dtype` is deprecated! Use `dtype` instead!
935
+ [INFO|2025-10-22 16:16:17] llamafactory.model.model_utils.kv_cache:143 >> KV cache is enabled for faster generation.
936
+ [WARNING|logging.py:328] 2025-10-22 16:16:17,793 >> `torch_dtype` is deprecated! Use `dtype` instead!
937
+ [INFO|modeling_utils.py:1172] 2025-10-22 16:16:17,793 >> loading weights file model.safetensors from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/model.safetensors
938
+ [INFO|modeling_utils.py:2341] 2025-10-22 16:16:17,794 >> Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
939
+ [INFO|configuration_utils.py:986] 2025-10-22 16:16:17,795 >> Generate config GenerationConfig {
940
+ "bos_token_id": 151643,
941
+ "eos_token_id": 151643
942
+ }
943
+
944
+ [INFO|configuration_utils.py:941] 2025-10-22 16:16:17,885 >> loading configuration file generation_config.json from cache at /scratch/zrs2020/.cache/hf_cache/home/hub/models--Qwen--Qwen2.5-0.5B/snapshots/060db6499f32faf8b98477b0a26969ef7d8b9987/generation_config.json
945
+ [INFO|configuration_utils.py:986] 2025-10-22 16:16:17,885 >> Generate config GenerationConfig {
946
+ "bos_token_id": 151643,
947
+ "eos_token_id": 151643,
948
+ "max_new_tokens": 2048
949
+ }
950
+
951
+ [INFO|dynamic_module_utils.py:423] 2025-10-22 16:16:17,918 >> Could not locate the custom_generate/generate.py inside Qwen/Qwen2.5-0.5B.
952
+ [INFO|2025-10-22 16:16:17] llamafactory.model.model_utils.attention:143 >> Using torch SDPA for faster training and inference.
953
+ [INFO|2025-10-22 16:16:19] llamafactory.model.adapter:143 >> Merged 1 adapter(s).
954
+ [INFO|2025-10-22 16:16:19] llamafactory.model.adapter:143 >> Loaded adapter(s): /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-100
955
+ [INFO|2025-10-22 16:16:19] llamafactory.model.loader:143 >> all params: 494,032,768
956
+ [INFO|2025-10-22 16:16:19] llamafactory.train.tuner:143 >> Convert model dtype to: torch.bfloat16.
957
+ [INFO|configuration_utils.py:491] 2025-10-22 16:16:19,094 >> Configuration saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/merged/config.json
958
+ [INFO|configuration_utils.py:757] 2025-10-22 16:16:19,098 >> Configuration saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/merged/generation_config.json
959
+ [INFO|modeling_utils.py:4181] 2025-10-22 16:16:20,738 >> Model weights saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/merged/model.safetensors
960
+ [INFO|tokenization_utils_base.py:2421] 2025-10-22 16:16:20,743 >> chat template saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/merged/chat_template.jinja
961
+ [INFO|tokenization_utils_base.py:2590] 2025-10-22 16:16:20,746 >> tokenizer config file saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/merged/tokenizer_config.json
962
+ [INFO|tokenization_utils_base.py:2599] 2025-10-22 16:16:20,750 >> Special tokens file saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/merged/special_tokens_map.json
963
+ [INFO|2025-10-22 16:16:20] llamafactory.train.tuner:143 >> Ollama modelfile saved in /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/merged/Modelfile
964
+
965
+ ========================================
966
+ Merge/Export completed successfully
967
+ End Time: Wed Oct 22 04:16:21 PM EDT 2025
968
+ ========================================
969
+
970
+ ========================================
971
+ Preparing Training Artifacts
972
+ ========================================
973
+ Copying configuration files...
974
+ Copying and cleaning training logs...
training_artifacts/merge_config.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ model_name_or_path: Qwen/Qwen2.5-0.5B
2
+ finetuning_type: lora
3
+ trust_remote_code: true
4
+ adapter_name_or_path: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints/checkpoint-100
5
+ template: default
6
+ export_dir: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/merged
training_artifacts/train_config.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ stage: sft
2
+ do_train: true
3
+ max_steps: 100
4
+ do_eval: false
5
+ save_strategy: steps
6
+ save_steps: 50
7
+ logging_steps: 10
8
+ fp16: true
9
+ bf16: false
10
+ overwrite_output_dir: true
11
+ per_device_train_batch_size: 1
12
+ gradient_accumulation_steps: 1
13
+ gradient_checkpointing: true
14
+ model_name_or_path: Qwen/Qwen2.5-0.5B
15
+ finetuning_type: lora
16
+ dataset: my_custom_sft
17
+ dataset_dir: /scratch/zrs2020/LlamaFactoryHelper/LLaMA-Factory/data
18
+ template: default
19
+ cutoff_len: 8096
20
+ val_size: 0.1
21
+ lora_rank: 8
22
+ lora_alpha: 16
23
+ lora_dropout: 0.05
24
+ lora_target: all
25
+ output_dir: /scratch/zrs2020/LlamaFactoryHelper/experiments/lf_torch_test__local/checkpoints