Siqi-Hu commited on
Commit
0d1e7e3
·
verified ·
1 Parent(s): 1160ebb

Training in progress, step 40

Browse files
logs/events.out.tfevents.1753121141.ly-w220.74316.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2aef4935c2fd45056b911a3e5c74967d2a12ff6d2775baa1122fa96466dca3b
3
+ size 8078
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": false,
35
+ "max_length": 512,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "padding_side": "right",
39
+ "sp_model_kwargs": {},
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "truncation": true,
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": false
44
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a79b7b1b4cd99b581e8df7da7284d9bd1b9194698357e31c0992f0c1fac967b2
3
+ size 5496
training_metrics.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train_loss": [
3
+ 5.351,
4
+ 5.5235,
5
+ 5.5572,
6
+ 5.1569,
7
+ 5.5931,
8
+ 4.9848,
9
+ 5.546,
10
+ 5.3987
11
+ ],
12
+ "train_steps": [
13
+ 5,
14
+ 10,
15
+ 15,
16
+ 20,
17
+ 25,
18
+ 30,
19
+ 35,
20
+ 40
21
+ ],
22
+ "train_epochs": [
23
+ 0.011428571428571429,
24
+ 0.022857142857142857,
25
+ 0.03428571428571429,
26
+ 0.045714285714285714,
27
+ 0.05714285714285714,
28
+ 0.06857142857142857,
29
+ 0.08,
30
+ 0.09142857142857143
31
+ ],
32
+ "eval_loss": [
33
+ 5.396810531616211,
34
+ 5.2552690505981445
35
+ ],
36
+ "eval_steps": [
37
+ 20,
38
+ 40
39
+ ],
40
+ "eval_epochs": [
41
+ 0.045714285714285714,
42
+ 0.09142857142857143
43
+ ]
44
+ }