fif3 commited on
Commit
1ecdd0e
·
verified ·
1 Parent(s): 40d2f77

Model save

Browse files
README.md CHANGED
@@ -4,8 +4,8 @@ library_name: transformers
4
  model_name: MobileGameNPC
5
  tags:
6
  - generated_from_trainer
7
- - trl
8
  - sft
 
9
  licence: license
10
  ---
11
 
@@ -34,11 +34,11 @@ This model was trained with SFT.
34
 
35
  ### Framework versions
36
 
37
- - TRL: 0.21.0
38
- - Transformers: 4.55.3
39
- - Pytorch: 2.8.0
40
  - Datasets: 4.0.0
41
- - Tokenizers: 0.21.4
42
 
43
  ## Citations
44
 
 
4
  model_name: MobileGameNPC
5
  tags:
6
  - generated_from_trainer
 
7
  - sft
8
+ - trl
9
  licence: license
10
  ---
11
 
 
34
 
35
  ### Framework versions
36
 
37
+ - TRL: 0.19.0
38
+ - Transformers: 4.52.0.dev0
39
+ - Pytorch: 2.7.0+cu118
40
  - Datasets: 4.0.0
41
+ - Tokenizers: 0.21.2
42
 
43
  ## Citations
44
 
added_tokens.json CHANGED
@@ -1,3 +1,3 @@
1
- {
2
- "<image_soft_token>": 262144
3
- }
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
chat_template.jinja CHANGED
@@ -1,47 +1,47 @@
1
- {{ bos_token }}
2
- {%- if messages[0]['role'] == 'system' -%}
3
- {%- if messages[0]['content'] is string -%}
4
- {%- set first_user_prefix = messages[0]['content'] + '
5
-
6
- ' -%}
7
- {%- else -%}
8
- {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
-
10
- ' -%}
11
- {%- endif -%}
12
- {%- set loop_messages = messages[1:] -%}
13
- {%- else -%}
14
- {%- set first_user_prefix = "" -%}
15
- {%- set loop_messages = messages -%}
16
- {%- endif -%}
17
- {%- for message in loop_messages -%}
18
- {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
- {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
- {%- endif -%}
21
- {%- if (message['role'] == 'assistant') -%}
22
- {%- set role = "model" -%}
23
- {%- else -%}
24
- {%- set role = message['role'] -%}
25
- {%- endif -%}
26
- {{ '<start_of_turn>' + role + '
27
- ' + (first_user_prefix if loop.first else "") }}
28
- {%- if message['content'] is string -%}
29
- {{ message['content'] | trim }}
30
- {%- elif message['content'] is iterable -%}
31
- {%- for item in message['content'] -%}
32
- {%- if item['type'] == 'image' -%}
33
- {{ '<start_of_image>' }}
34
- {%- elif item['type'] == 'text' -%}
35
- {{ item['text'] | trim }}
36
- {%- endif -%}
37
- {%- endfor -%}
38
- {%- else -%}
39
- {{ raise_exception("Invalid content type") }}
40
- {%- endif -%}
41
- {{ '<end_of_turn>
42
- ' }}
43
- {%- endfor -%}
44
- {%- if add_generation_prompt -%}
45
- {{'<start_of_turn>model
46
- '}}
47
- {%- endif -%}
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
config.json CHANGED
@@ -1,54 +1,56 @@
1
- {
2
- "_sliding_window_pattern": 6,
3
- "architectures": [
4
- "Gemma3ForCausalLM"
5
- ],
6
- "attention_bias": false,
7
- "attention_dropout": 0.0,
8
- "attn_logit_softcapping": null,
9
- "bos_token_id": 2,
10
- "eos_token_id": 1,
11
- "final_logit_softcapping": null,
12
- "head_dim": 256,
13
- "hidden_activation": "gelu_pytorch_tanh",
14
- "hidden_size": 640,
15
- "initializer_range": 0.02,
16
- "intermediate_size": 2048,
17
- "layer_types": [
18
- "sliding_attention",
19
- "sliding_attention",
20
- "sliding_attention",
21
- "sliding_attention",
22
- "sliding_attention",
23
- "full_attention",
24
- "sliding_attention",
25
- "sliding_attention",
26
- "sliding_attention",
27
- "sliding_attention",
28
- "sliding_attention",
29
- "full_attention",
30
- "sliding_attention",
31
- "sliding_attention",
32
- "sliding_attention",
33
- "sliding_attention",
34
- "sliding_attention",
35
- "full_attention"
36
- ],
37
- "max_position_embeddings": 32768,
38
- "model_type": "gemma3_text",
39
- "num_attention_heads": 4,
40
- "num_hidden_layers": 18,
41
- "num_key_value_heads": 1,
42
- "pad_token_id": 0,
43
- "query_pre_attn_scalar": 256,
44
- "rms_norm_eps": 1e-06,
45
- "rope_local_base_freq": 10000.0,
46
- "rope_scaling": null,
47
- "rope_theta": 1000000.0,
48
- "sliding_window": 512,
49
- "torch_dtype": "bfloat16",
50
- "transformers_version": "4.55.3",
51
- "use_bidirectional_attention": false,
52
- "use_cache": true,
53
- "vocab_size": 262144
54
- }
 
 
 
1
+ {
2
+ "_sliding_window_pattern": 6,
3
+ "architectures": [
4
+ "Gemma3ForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "attn_logit_softcapping": null,
9
+ "bos_token_id": 2,
10
+ "cache_implementation": "hybrid",
11
+ "eos_token_id": 1,
12
+ "final_logit_softcapping": null,
13
+ "head_dim": 256,
14
+ "hidden_activation": "gelu_pytorch_tanh",
15
+ "hidden_size": 640,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 2048,
18
+ "layer_types": [
19
+ "sliding_attention",
20
+ "sliding_attention",
21
+ "sliding_attention",
22
+ "sliding_attention",
23
+ "sliding_attention",
24
+ "full_attention",
25
+ "sliding_attention",
26
+ "sliding_attention",
27
+ "sliding_attention",
28
+ "sliding_attention",
29
+ "sliding_attention",
30
+ "full_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "sliding_attention",
35
+ "sliding_attention",
36
+ "full_attention"
37
+ ],
38
+ "max_position_embeddings": 32768,
39
+ "model_type": "gemma3_text",
40
+ "num_attention_heads": 4,
41
+ "num_hidden_layers": 18,
42
+ "num_key_value_heads": 1,
43
+ "pad_token_id": 0,
44
+ "query_pre_attn_scalar": 256,
45
+ "rms_norm_eps": 1e-06,
46
+ "rope_local_base_freq": 10000.0,
47
+ "rope_scaling": null,
48
+ "rope_theta": 1000000.0,
49
+ "sliding_window": 512,
50
+ "sliding_window_pattern": 6,
51
+ "torch_dtype": "bfloat16",
52
+ "transformers_version": "4.52.0.dev0",
53
+ "use_bidirectional_attention": false,
54
+ "use_cache": true,
55
+ "vocab_size": 262144
56
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1cd59096eced573c6d28eb2dae76b6bd5bdd892a0819b4a721f909b179454e9
3
  size 536223056
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ff85627cd83b475da67f25a03c3479347cab6d359d3703648c44700c286c3f2
3
  size 536223056
runs/Aug22_19-03-09_llm-gpu/events.out.tfevents.1755878605.llm-gpu.3209988.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f84e0d1cbe4a0c269ac70eb0c62bf765cc57b4ce5c27d6979aa00c584684de04
3
+ size 14972
special_tokens_map.json CHANGED
@@ -1,33 +1,33 @@
1
- {
2
- "boi_token": "<start_of_image>",
3
- "bos_token": {
4
- "content": "<bos>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- "eoi_token": "<end_of_image>",
11
- "eos_token": {
12
- "content": "<eos>",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false
17
- },
18
- "image_token": "<image_soft_token>",
19
- "pad_token": {
20
- "content": "<pad>",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false
25
- },
26
- "unk_token": {
27
- "content": "<unk>",
28
- "lstrip": false,
29
- "normalized": false,
30
- "rstrip": false,
31
- "single_word": false
32
- }
33
- }
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer_config.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a45dc634eeadb858db45867bbe88df52aedc78e1fae69ca0fe3c36871fe731f3
3
  size 6161
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72822078b3a74f97e866a041c29d69bbdfedadb5a70d49c17b575207f71e402f
3
  size 6161