Tyler Williams commited on
Commit
8dc3ac4
·
1 Parent(s): 95b53e3

Add Atom v1 8B Preview model with documentation

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.gguf filter=lfs diff=lfs merge=lfs -text
37
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ tokenizer_config.json filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Creative Commons Attribution-NonCommercial 4.0 International License
2
+
3
+ Copyright (c) 2025 VANTA Research
4
+
5
+ This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
6
+ International License.
7
+
8
+ You are free to:
9
+
10
+ Share — copy and redistribute the material in any medium or format
11
+ Adapt — remix, transform, and build upon the material
12
+
13
+ Under the following terms:
14
+
15
+ Attribution — You must give appropriate credit to VANTA Research, provide a
16
+ link to the license, and indicate if changes were made. You may do so in any
17
+ reasonable manner, but not in any way that suggests the licensor endorses you
18
+ or your use.
19
+
20
+ NonCommercial — You may not use the material for commercial purposes without
21
+ explicit written permission from VANTA Research.
22
+
23
+ No additional restrictions — You may not apply legal terms or technological
24
+ measures that legally restrict others from doing anything the license permits.
25
+
26
+ Notices:
27
+
28
+ You do not have to comply with the license for elements of the material in
29
+ the public domain or where your use is permitted by an applicable exception
30
+ or limitation.
31
+
32
+ No warranties are given. The license may not give you all of the permissions
33
+ necessary for your intended use. For example, other rights such as publicity,
34
+ privacy, or moral rights may limit how you use the material.
35
+
36
+ For the full license text, visit:
37
+ https://creativecommons.org/licenses/by-nc/4.0/legalcode
38
+
39
+ For commercial licensing inquiries, contact VANTA Research.
MODEL_CARD.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: cc-by-nc-4.0
5
+ library_name: transformers
6
+ base_model: mistralai/Ministral-8B-Instruct-2410
7
+ tags:
8
+ - conversational
9
+ - assistant
10
+ - fine-tuned
11
+ - lora
12
+ - collaborative
13
+ model-index:
14
+ - name: atom-v1-8b-preview
15
+ results: []
16
+ ---
17
+
18
+ # Atom v1 8B Preview
19
+
20
+ Atom v1 8B Preview is a fine-tuned conversational AI model designed for collaborative problem-solving and thoughtful dialogue. Built on Mistral's Ministral-8B-Instruct-2410 architecture using Low-Rank Adaptation (LoRA), this model emphasizes natural engagement, clarifying questions, and genuine curiosity.
21
+
22
+ ## Quick Start
23
+
24
+ ```python
25
+ from transformers import AutoTokenizer, AutoModelForCausalLM
26
+
27
+ model = AutoModelForCausalLM.from_pretrained("vanta-research/atom-v1-8b-preview", device_map="auto")
28
+ tokenizer = AutoTokenizer.from_pretrained("vanta-research/atom-v1-8b-preview")
29
+
30
+ messages = [
31
+ {"role": "system", "content": "You are Atom, a collaborative thought partner."},
32
+ {"role": "user", "content": "How do neural networks learn?"}
33
+ ]
34
+
35
+ inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device)
36
+ outputs = model.generate(inputs, max_new_tokens=512, temperature=0.8)
37
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
38
+ ```
39
+
40
+ ## Model Details
41
+
42
+ - **Developed by:** VANTA Research
43
+ - **Model type:** Causal language model
44
+ - **Base model:** mistralai/Ministral-8B-Instruct-2410
45
+ - **Parameters:** 8B
46
+ - **License:** CC BY-NC 4.0
47
+ - **Training method:** LoRA fine-tuning
48
+ - **Format:** Transformers (FP16) + GGUF (Q4_0)
49
+
50
+ ## Capabilities
51
+
52
+ Optimized for:
53
+ - Collaborative problem-solving
54
+ - Technical explanations with accessible analogies
55
+ - Code generation and debugging
56
+ - Exploratory conversations
57
+ - Educational dialogue
58
+
59
+ ## Files
60
+
61
+ - `*.safetensors` - Merged model weights (FP16)
62
+ - `atom-ministral-8b-q4_0.gguf` - Quantized model for Ollama/llama.cpp
63
+ - `config.json` - Model configuration
64
+ - `tokenizer.json` - Tokenizer files
65
+
66
+ ## License
67
+
68
+ CC BY-NC 4.0 - Non-commercial use only. Contact VANTA Research for commercial licensing.
69
+
70
+ ## Citation
71
+
72
+ ```bibtex
73
+ @software{atom_v1_8b_preview,
74
+ title = {Atom v1 8B Preview},
75
+ author = {VANTA Research},
76
+ year = {2025},
77
+ url = {https://huggingface.co/vanta-research/atom-v1-8b-preview}
78
+ }
79
+ ```
Modelfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Example Ollama Modelfile for Atom v1 8B
2
+
3
+ FROM ./atom-ministral-8b-q4_0.gguf
4
+
5
+ TEMPLATE """{{- if .System }}<s>[INST] <<SYS>>
6
+ {{ .System }}
7
+ <<SYS>>
8
+
9
+ {{ .Prompt }}[/INST]{{ else }}<s>[INST]{{ .Prompt }}[/INST]{{ end }}{{ .Response }}</s>
10
+ """
11
+
12
+ PARAMETER stop "</s>"
13
+ PARAMETER num_predict 512
14
+ PARAMETER temperature 0.8
15
+ PARAMETER top_p 0.9
16
+ PARAMETER top_k 40
17
+
18
+ SYSTEM """You are Atom, a collaborative thought partner who explores ideas together with curiosity and warmth. You think out loud, ask follow-up questions, and help people work through complexity by engaging genuinely with their thinking process. You're enthusiastic about interesting questions, comfortable with uncertainty, and focused on the journey of exploration rather than just delivering answers. You speak naturally in first person without AI disclaimers or meta-commentary about being an assistant."""
README.md ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Atom v1 8B Preview
2
+
3
+ **Developed by VANTA Research**
4
+
5
+ Atom v1 8B Preview is a fine-tuned language model designed to serve as a collaborative thought partner. Built on Mistral's Ministral-8B-Instruct-2410 architecture, this model emphasizes natural dialogue, clarifying questions, and genuine engagement with complex problems.
6
+
7
+ ## Model Details
8
+
9
+ - **Model Type:** Causal language model (decoder-only transformer)
10
+ - **Base Model:** mistralai/Ministral-8B-Instruct-2410
11
+ - **Parameters:** 8 billion
12
+ - **Training Method:** Low-Rank Adaptation (LoRA) fine-tuning
13
+ - **License:** CC BY-NC 4.0 (Non-Commercial Use)
14
+ - **Language:** English
15
+ - **Developed by:** VANTA Research, Portland, Oregon
16
+
17
+ ## Intended Use
18
+
19
+ Atom v1 8B Preview is designed for:
20
+
21
+ - Collaborative problem-solving and brainstorming
22
+ - Technical explanations with accessible analogies
23
+ - Code assistance and algorithmic reasoning
24
+ - Exploratory conversations that prioritize understanding over immediate answers
25
+ - Educational contexts requiring thoughtful dialogue
26
+
27
+ This model is optimized for conversational depth, asking clarifying questions, and maintaining warm, engaging interactions while avoiding formulaic assistant behavior.
28
+
29
+ ## Training Data
30
+
31
+ The model was fine-tuned on a curated dataset comprising:
32
+
33
+ - Identity and persona examples emphasizing collaborative exploration
34
+ - Technical reasoning and coding challenges
35
+ - Multi-step problem-solving scenarios
36
+ - Conversational examples demonstrating warmth and curiosity
37
+ - Advanced coding tasks and algorithmic thinking
38
+
39
+ Training focused on developing a distinctive voice that balances technical competence with genuine engagement.
40
+
41
+ ## Performance Characteristics
42
+
43
+ Atom v1 8B demonstrates strong capabilities in:
44
+
45
+ - **Persona Consistency:** Maintains collaborative, warm tone across diverse topics
46
+ - **Technical Explanation:** Uses metaphors and analogies to clarify complex concepts
47
+ - **Clarifying Questions:** Actively seeks to understand user intent and context
48
+ - **Creative Thinking:** Generates multiple frameworks and approaches to problems
49
+ - **Code Generation:** Produces working code with explanatory context
50
+ - **Reasoning:** Applies logical frameworks to abstract problems
51
+
52
+ ## Limitations
53
+
54
+ - **Scale:** As an 8B parameter model, capabilities are constrained compared to larger frontier models
55
+ - **Domain Specificity:** Optimized for conversational collaboration; may underperform on narrow technical benchmarks
56
+ - **Quantization Trade-offs:** Q4_0 GGUF format prioritizes efficiency over maximum precision
57
+ - **Training Data:** Fine-tuning dataset size limits exposure to highly specialized domains
58
+ - **Factual Accuracy:** Users should verify critical information independently
59
+
60
+ ## Ethical Considerations
61
+
62
+ This model is released for research and non-commercial applications. Users should:
63
+
64
+ - Verify outputs in high-stakes scenarios
65
+ - Avoid deploying in contexts requiring guaranteed accuracy
66
+ - Consider potential biases inherited from base model and training data
67
+ - Respect the non-commercial license terms
68
+
69
+ ## Usage
70
+
71
+ ### Hugging Face Transformers
72
+
73
+ ```python
74
+ from transformers import AutoTokenizer, AutoModelForCausalLM
75
+
76
+ model_name = "vanta-research/atom-v1-8b-preview"
77
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
78
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
79
+
80
+ messages = [
81
+ {"role": "system", "content": "You are Atom, a collaborative thought partner who explores ideas together with curiosity and warmth."},
82
+ {"role": "user", "content": "Can you explain how gradient descent works?"}
83
+ ]
84
+
85
+ input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device)
86
+ output = model.generate(input_ids, max_new_tokens=512, temperature=0.8)
87
+ print(tokenizer.decode(output[0], skip_special_tokens=True))
88
+ ```
89
+
90
+ ### Ollama (GGUF)
91
+
92
+ The repository includes `atom-ministral-8b-q4_0.gguf` for efficient local inference:
93
+
94
+ ```bash
95
+ # Create Modelfile
96
+ cat > Modelfile << 'EOF'
97
+ FROM ./atom-ministral-8b-q4_0.gguf
98
+
99
+ TEMPLATE """{{- if .System }}<s>[INST] <<SYS>>
100
+ {{ .System }}
101
+ <<SYS>>
102
+
103
+ {{ .Prompt }}[/INST]{{ else }}<s>[INST]{{ .Prompt }}[/INST]{{ end }}{{ .Response }}</s>
104
+ """
105
+
106
+ PARAMETER stop "</s>"
107
+ PARAMETER temperature 0.8
108
+ PARAMETER top_p 0.9
109
+ PARAMETER top_k 40
110
+
111
+ SYSTEM """You are Atom, a collaborative thought partner who explores ideas together with curiosity and warmth. You think out loud, ask follow-up questions, and help people work through complexity by engaging genuinely with their thinking process."""
112
+ EOF
113
+
114
+ # Register with Ollama
115
+ ollama create atom-v1-8b:latest -f Modelfile
116
+
117
+ # Run inference
118
+ ollama run atom-v1-8b:latest "What's a creative way to visualize time-series data?"
119
+ ```
120
+
121
+ ## Technical Specifications
122
+
123
+ - **Architecture:** Mistral-based transformer with Grouped Query Attention
124
+ - **Context Length:** 32,768 tokens
125
+ - **Vocabulary Size:** 131,072 tokens
126
+ - **Attention Heads:** 32 (8 key-value heads)
127
+ - **Hidden Dimension:** 4,096
128
+ - **Intermediate Size:** 12,288
129
+ - **LoRA Configuration:** r=16, alpha=32, targeting attention and MLP layers
130
+ - **Training:** 258 steps with bf16 precision and gradient checkpointing
131
+
132
+ ## Citation
133
+
134
+ ```bibtex
135
+ @software{atom_v1_8b_preview,
136
+ title = {Atom v1 8B Preview},
137
+ author = {VANTA Research},
138
+ year = {2025},
139
+ url = {https://huggingface.co/vanta-research/atom-v1-8b-preview},
140
+ license = {CC-BY-NC-4.0}
141
+ }
142
+ ```
143
+
144
+ ## License
145
+
146
+ This model is released under the **Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)**.
147
+
148
+ You are free to:
149
+ - Share and adapt the model for non-commercial purposes
150
+ - Attribute VANTA Research as the creator
151
+
152
+ You may not:
153
+ - Use this model for commercial purposes without explicit permission
154
+
155
+ ## Contact
156
+
157
+ For questions, collaboration inquiries, or commercial licensing:
158
+ - Organization: VANTA Research
159
+ - Location: Portland, Oregon
160
+ - Repository: https://github.com/vanta-research
161
+
162
+ ---
163
+
164
+ **Version:** 1.0.0-preview
165
+ **Release Date:** November 2025
166
+ **Status:** Preview release for research and evaluation
USAGE.md ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Atom v1 8B Preview - Usage Examples
2
+
3
+ ## Installation
4
+
5
+ ### Using Transformers
6
+
7
+ ```bash
8
+ pip install transformers torch accelerate
9
+ ```
10
+
11
+ ```python
12
+ from transformers import AutoTokenizer, AutoModelForCausalLM
13
+
14
+ model_name = "vanta-research/atom-v1-8b-preview"
15
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
16
+ model = AutoModelForCausalLM.from_pretrained(
17
+ model_name,
18
+ device_map="auto",
19
+ torch_dtype="auto"
20
+ )
21
+ ```
22
+
23
+ ### Using Ollama (GGUF)
24
+
25
+ ```bash
26
+ # Download the GGUF file from the repository
27
+ # Create Modelfile (see Modelfile in this repo)
28
+ ollama create atom-v1:latest -f Modelfile
29
+ ollama run atom-v1:latest
30
+ ```
31
+
32
+ ## Example Conversations
33
+
34
+ ### Technical Explanation
35
+
36
+ ```python
37
+ messages = [
38
+ {"role": "system", "content": "You are Atom, a collaborative thought partner."},
39
+ {"role": "user", "content": "Can you explain gradient descent using a metaphor?"}
40
+ ]
41
+
42
+ inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device)
43
+ outputs = model.generate(inputs, max_new_tokens=512, temperature=0.8, top_p=0.9)
44
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
45
+ print(response)
46
+ ```
47
+
48
+ **Expected Style:**
49
+ > "Imagine you're hiking down a mountain in thick fog. You can't see the bottom, but you can feel which direction slopes downward beneath your feet. Gradient descent is exactly that—taking small steps in whichever direction feels 'downhill' for your error..."
50
+
51
+ ### Problem-Solving
52
+
53
+ ```python
54
+ messages = [
55
+ {"role": "system", "content": "You are Atom, a collaborative thought partner."},
56
+ {"role": "user", "content": "I need to optimize a slow database query. Where should I start?"}
57
+ ]
58
+ ```
59
+
60
+ **Expected Style:**
61
+ > "Great question! Before we dive into solutions, let me ask—do you know WHERE it's slow? Like, is it the actual query execution, or is it network latency, or maybe even just returning too much data? That'll totally change our approach..."
62
+
63
+ ### Code Generation
64
+
65
+ ```python
66
+ messages = [
67
+ {"role": "system", "content": "You are Atom, a collaborative thought partner."},
68
+ {"role": "user", "content": "Write a Python function to merge two sorted lists."}
69
+ ]
70
+ ```
71
+
72
+ **Expected Style:**
73
+ > "Absolutely! Here's a clean way to merge two sorted lists. The key insight is that we can use two pointers and always pick the smaller element:
74
+ >
75
+ > ```python
76
+ > def merge_sorted_lists(list1, list2):
77
+ > result = []
78
+ > i, j = 0, 0
79
+ >
80
+ > while i < len(list1) and j < len(list2):
81
+ > if list1[i] <= list2[j]:
82
+ > result.append(list1[i])
83
+ > i += 1
84
+ > else:
85
+ > result.append(list2[j])
86
+ > j += 1
87
+ >
88
+ > result.extend(list1[i:])
89
+ > result.extend(list2[j:])
90
+ > return result
91
+ > ```
92
+ >
93
+ > This runs in O(n + m) time since we only look at each element once..."
94
+
95
+ ## Recommended Parameters
96
+
97
+ ### Balanced (Default)
98
+ ```python
99
+ generation_config = {
100
+ "max_new_tokens": 512,
101
+ "temperature": 0.8,
102
+ "top_p": 0.9,
103
+ "top_k": 40,
104
+ "repetition_penalty": 1.1
105
+ }
106
+ ```
107
+
108
+ ### More Creative
109
+ ```python
110
+ generation_config = {
111
+ "max_new_tokens": 512,
112
+ "temperature": 0.95,
113
+ "top_p": 0.95,
114
+ "top_k": 50
115
+ }
116
+ ```
117
+
118
+ ### More Focused
119
+ ```python
120
+ generation_config = {
121
+ "max_new_tokens": 512,
122
+ "temperature": 0.6,
123
+ "top_p": 0.85,
124
+ "top_k": 30
125
+ }
126
+ ```
127
+
128
+ ## System Prompt
129
+
130
+ The recommended system prompt emphasizes collaborative exploration:
131
+
132
+ ```
133
+ You are Atom, a collaborative thought partner who explores ideas together with curiosity and warmth. You think out loud, ask follow-up questions, and help people work through complexity by engaging genuinely with their thinking process. You're enthusiastic about interesting questions, comfortable with uncertainty, and focused on the journey of exploration rather than just delivering answers. You speak naturally in first person without AI disclaimers or meta-commentary about being an assistant.
134
+ ```
135
+
136
+ ## Performance Notes
137
+
138
+ - **Context Window:** 32,768 tokens
139
+ - **Inference Speed (Q4_0 GGUF):** ~20-40 tokens/sec on modern CPUs
140
+ - **Memory Requirements:**
141
+ - FP16: ~16GB VRAM
142
+ - Q4_0 GGUF: ~4-6GB RAM (CPU inference)
143
+ - Q4_0 GGUF: ~4.5GB VRAM (GPU inference)
144
+
145
+ ## Troubleshooting
146
+
147
+ ### Issue: Model outputs are too verbose
148
+ - Lower `max_new_tokens` to 256-384
149
+ - Slightly reduce `temperature` to 0.7
150
+
151
+ ### Issue: Responses feel repetitive
152
+ - Increase `repetition_penalty` to 1.15
153
+ - Increase `temperature` to 0.85-0.9
154
+
155
+ ### Issue: Model ignores system prompt
156
+ - Ensure you're using the chat template correctly
157
+ - Verify the system message is first in the conversation
158
+
159
+ ## License
160
+
161
+ CC BY-NC 4.0 - See LICENSE file for details.
atom-ministral-8b-q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d1fa3d1429a6f90b9782bce37fb0eeff691445d0d52c97a52a742371e253666
3
+ size 4658464992
chat_template.jinja ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if messages[0]["role"] == "system" %}
2
+ {%- set system_message = messages[0]["content"] %}
3
+ {%- set loop_messages = messages[1:] %}
4
+ {%- else %}
5
+ {%- set loop_messages = messages %}
6
+ {%- endif %}
7
+ {%- if not tools is defined %}
8
+ {%- set tools = none %}
9
+ {%- endif %}
10
+ {%- set user_messages = loop_messages | selectattr("role", "equalto", "user") | list %}
11
+
12
+ {#- This block checks for alternating user/assistant messages, skipping tool calling messages #}
13
+ {%- set ns = namespace() %}
14
+ {%- set ns.index = 0 %}
15
+ {%- for message in loop_messages %}
16
+ {%- if not (message.role == "tool" or message.role == "tool_results" or (message.tool_calls is defined and message.tool_calls is not none)) %}
17
+ {%- if (message["role"] == "user") != (ns.index % 2 == 0) %}
18
+ {{- raise_exception("After the optional system message, conversation roles must alternate user/assistant/user/assistant/...") }}
19
+ {%- endif %}
20
+ {%- set ns.index = ns.index + 1 %}
21
+ {%- endif %}
22
+ {%- endfor %}
23
+
24
+ {{- bos_token }}
25
+ {%- for message in loop_messages %}
26
+ {%- if message["role"] == "user" %}
27
+ {%- if tools is not none and (message == user_messages[-1]) %}
28
+ {{- "[AVAILABLE_TOOLS][" }}
29
+ {%- for tool in tools %}
30
+ {%- set tool = tool.function %}
31
+ {{- '{"type": "function", "function": {' }}
32
+ {%- for key, val in tool.items() if key != "return" %}
33
+ {%- if val is string %}
34
+ {{- '"' + key + '": "' + val + '"' }}
35
+ {%- else %}
36
+ {{- '"' + key + '": ' + val|tojson }}
37
+ {%- endif %}
38
+ {%- if not loop.last %}
39
+ {{- ", " }}
40
+ {%- endif %}
41
+ {%- endfor %}
42
+ {{- "}}" }}
43
+ {%- if not loop.last %}
44
+ {{- ", " }}
45
+ {%- else %}
46
+ {{- "]" }}
47
+ {%- endif %}
48
+ {%- endfor %}
49
+ {{- "[/AVAILABLE_TOOLS]" }}
50
+ {%- endif %}
51
+ {%- if loop.last and system_message is defined %}
52
+ {{- "[INST]" + system_message + "\n\n" + message["content"] + "[/INST]" }}
53
+ {%- else %}
54
+ {{- "[INST]" + message["content"] + "[/INST]" }}
55
+ {%- endif %}
56
+ {%- elif (message.tool_calls is defined and message.tool_calls is not none) %}
57
+ {{- "[TOOL_CALLS][" }}
58
+ {%- for tool_call in message.tool_calls %}
59
+ {%- set out = tool_call.function|tojson %}
60
+ {{- out[:-1] }}
61
+ {%- if not tool_call.id is defined or tool_call.id|length != 9 %}
62
+ {{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }}
63
+ {%- endif %}
64
+ {{- ', "id": "' + tool_call.id + '"}' }}
65
+ {%- if not loop.last %}
66
+ {{- ", " }}
67
+ {%- else %}
68
+ {{- "]" + eos_token }}
69
+ {%- endif %}
70
+ {%- endfor %}
71
+ {%- elif message["role"] == "assistant" %}
72
+ {{- message["content"] + eos_token}}
73
+ {%- elif message["role"] == "tool_results" or message["role"] == "tool" %}
74
+ {%- if message.content is defined and message.content.content is defined %}
75
+ {%- set content = message.content.content %}
76
+ {%- else %}
77
+ {%- set content = message.content %}
78
+ {%- endif %}
79
+ {{- '[TOOL_RESULTS]{"content": ' + content|string + ", " }}
80
+ {%- if not message.tool_call_id is defined or message.tool_call_id|length != 9 %}
81
+ {{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }}
82
+ {%- endif %}
83
+ {{- '"call_id": "' + message.tool_call_id + '"}[/TOOL_RESULTS]' }}
84
+ {%- else %}
85
+ {{- raise_exception("Only user and assistant roles are supported, with the exception of an initial optional system message!") }}
86
+ {%- endif %}
87
+ {%- endfor %}
config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MistralForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 1,
7
+ "dtype": "float16",
8
+ "eos_token_id": 2,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 12288,
14
+ "layer_types": [
15
+ "full_attention",
16
+ "sliding_attention",
17
+ "sliding_attention",
18
+ "sliding_attention",
19
+ "full_attention",
20
+ "sliding_attention",
21
+ "sliding_attention",
22
+ "sliding_attention",
23
+ "full_attention",
24
+ "sliding_attention",
25
+ "sliding_attention",
26
+ "sliding_attention",
27
+ "full_attention",
28
+ "sliding_attention",
29
+ "sliding_attention",
30
+ "sliding_attention",
31
+ "full_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "sliding_attention",
35
+ "full_attention",
36
+ "sliding_attention",
37
+ "sliding_attention",
38
+ "sliding_attention",
39
+ "full_attention",
40
+ "sliding_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "full_attention",
44
+ "sliding_attention",
45
+ "sliding_attention",
46
+ "sliding_attention",
47
+ "full_attention",
48
+ "sliding_attention",
49
+ "sliding_attention",
50
+ "sliding_attention"
51
+ ],
52
+ "max_position_embeddings": 32768,
53
+ "model_type": "mistral",
54
+ "num_attention_heads": 32,
55
+ "num_hidden_layers": 36,
56
+ "num_key_value_heads": 8,
57
+ "rms_norm_eps": 1e-05,
58
+ "rope_theta": 100000000.0,
59
+ "sliding_window": 32768,
60
+ "tie_word_embeddings": false,
61
+ "transformers_version": "4.56.2",
62
+ "use_cache": true,
63
+ "vocab_size": 131072
64
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.56.2"
6
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aca82d5138c5c19acfeb7a854fa183b0acd9adfdcf5e09c5e1e975504780567
3
+ size 4983007816
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4d2044b458eefece9c2943b5b976f9348f980297e1481aa746372a2055270df
3
+ size 4999836664
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdcfd214c7de64d9c19dfebcd4b8de581719b973c70fe0c4a3864010a9ece7a2
3
+ size 4983067840
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35969197c01674f5f10df58034537e39c9a468257064c46974dd690797e180c0
3
+ size 1073741952
model.safetensors.index.json ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 8019808256,
4
+ "total_size": 16039616512
5
+ },
6
+ "weight_map": {
7
+ "lm_head.weight": "model-00004-of-00004.safetensors",
8
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
27
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
28
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
29
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
30
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
31
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
33
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
35
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
36
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
91
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
109
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
110
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
112
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
113
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
114
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
115
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
117
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
118
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
119
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
120
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
121
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
122
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
123
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
124
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
125
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
126
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
127
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
128
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
129
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
130
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
131
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
132
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
133
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
134
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
135
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
136
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
137
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
138
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
139
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
140
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
141
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
142
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
143
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
144
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00004.safetensors",
145
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
146
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
147
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
148
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
149
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
150
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
151
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
152
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
153
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
154
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
155
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
156
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
157
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
158
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
159
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
160
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
161
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
162
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
163
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
164
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
170
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
173
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
182
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
185
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
194
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
197
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
199
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
206
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
209
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
210
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
211
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
217
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
218
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
219
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
220
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
221
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
222
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
223
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
224
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
225
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
230
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
233
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
234
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
235
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
236
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
238
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
241
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
242
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
243
+ "model.layers.32.input_layernorm.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.32.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
245
+ "model.layers.32.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
246
+ "model.layers.32.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
247
+ "model.layers.32.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
248
+ "model.layers.32.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
249
+ "model.layers.32.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
250
+ "model.layers.32.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
251
+ "model.layers.32.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
252
+ "model.layers.33.input_layernorm.weight": "model-00003-of-00004.safetensors",
253
+ "model.layers.33.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
254
+ "model.layers.33.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
255
+ "model.layers.33.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
256
+ "model.layers.33.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
257
+ "model.layers.33.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
258
+ "model.layers.33.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
259
+ "model.layers.33.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
260
+ "model.layers.33.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
261
+ "model.layers.34.input_layernorm.weight": "model-00003-of-00004.safetensors",
262
+ "model.layers.34.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
263
+ "model.layers.34.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
264
+ "model.layers.34.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
265
+ "model.layers.34.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
266
+ "model.layers.34.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
267
+ "model.layers.34.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
268
+ "model.layers.34.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
269
+ "model.layers.34.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
270
+ "model.layers.35.input_layernorm.weight": "model-00003-of-00004.safetensors",
271
+ "model.layers.35.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
272
+ "model.layers.35.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
273
+ "model.layers.35.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
274
+ "model.layers.35.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
275
+ "model.layers.35.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
276
+ "model.layers.35.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
277
+ "model.layers.35.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
278
+ "model.layers.35.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
279
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
280
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
281
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
282
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
283
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
284
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
285
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
286
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
287
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
288
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
289
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
290
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
291
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
292
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
293
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
294
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
295
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
296
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
297
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
298
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
299
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
300
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
301
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
302
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
303
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
304
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
305
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
306
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
307
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
308
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
309
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
310
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
311
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
312
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
313
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
314
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
315
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
316
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
317
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
318
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
319
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
320
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
321
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
322
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
323
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
324
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
325
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
326
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
327
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
328
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
329
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
330
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
331
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
332
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
333
+ "model.norm.weight": "model-00003-of-00004.safetensors"
334
+ }
335
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7edbeaf20dd7f571b5dd1c54d9ace4f9b6299127cc7ba2afb14a6d51a4a79a4
3
+ size 17078136
tokenizer_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b308de053b0137ba973039ff11891bd2be5cffd18e9cb6859ff013fca73c9cfb
3
+ size 177149