diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..52373fe24473b1aa44333d318f578ae6bf04b49b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+tokenizer.json filter=lfs diff=lfs merge=lfs -text
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..dc0084b9bb2ec0edb5ee1aa13870f566a759112a
--- /dev/null
+++ b/README.md
@@ -0,0 +1,29 @@
+---
+pipeline_tag: text-generation
+license: other
+license_name: modified-mit
+license_link: https://github.com/MiniMax-AI/MiniMax-M2.5/blob/main/LICENSE
+library_name: transformers
+tags:
+- mlx
+base_model: MiniMaxAI/MiniMax-M2.5
+---
+## 💫 Community Model> MiniMax-M2.5 by MiniMaxAI
+
+_👾 [LM Studio](https://lmstudio.ai) Community models highlights program. Highlighting new & noteworthy models by the community. Join the conversation on [Discord](https://discord.gg/aPQfnNkxGC)_.
+
+**Model creator**: [MiniMaxAI](https://huggingface.co/MiniMaxAI)
+**Original model**: [MiniMax-M2.5](https://huggingface.co/MiniMaxAI/MiniMax-M2.5)
+**MLX quantization**: provided by [LM Studio team](https://x.com/lmstudio) using [mlx_lm](https://github.com/ml-explore/mlx-lm)
+
+## Technical Details
+
+8-bit quantized version of MiniMax-M2.5 using MLX, optimized for Apple Silicon.
+
+## Special thanks
+
+🙏 Special thanks to the [Apple Machine Learning Research](https://github.com/ml-explore) team for creating [MLX](https://github.com/ml-explore/mlx).
+
+## Disclaimers
+
+LM Studio is not the creator, originator, or owner of any Model featured in the Community Model Program. Each Community Model is created and provided by third parties. LM Studio does not endorse, support, represent or guarantee the completeness, truthfulness, accuracy, or reliability of any Community Model. You understand that Community Models can produce content that might be offensive, harmful, inaccurate or otherwise inappropriate, or deceptive. Each Community Model is the sole responsibility of the person or entity who originated such Model. LM Studio may not monitor or control the Community Models and cannot, and does not, take responsibility for any such Model. LM Studio disclaims all warranties or guarantees about the accuracy, reliability or benefits of the Community Models. LM Studio further disclaims any warranty that the Community Model will meet your requirements, be secure, uninterrupted or available at any time or location, or error-free, viruses-free, or that any errors will be corrected, or otherwise. You will be solely responsible for any damage resulting from your use of or access to the Community Models, your downloading of any Community Model, or use of any other Community Model provided by or through LM Studio.
diff --git a/chat_template.jinja b/chat_template.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..4623080ad8c269fc12408c9586462eeddd3f7dd3
--- /dev/null
+++ b/chat_template.jinja
@@ -0,0 +1,159 @@
+{# ----------‑‑‑ special token variables ‑‑‑---------- #}
+{%- set toolcall_begin_token = '' -%}
+{%- set toolcall_end_token = '' -%}
+{#- Tool Rendering Functions ============================================== -#}
+{%- macro render_tool_namespace(namespace_name, tool_list) -%}
+{%- for tool in tool_list -%}
+{{ tool.function | tojson(ensure_ascii=False) }}
+{% endfor -%}
+{%- endmacro -%}
+{%- macro visible_text(content) -%}
+ {%- if content is string -%}
+ {{ content }}
+ {%- elif content is iterable and content is not mapping -%}
+ {%- for item in content -%}
+ {%- if item is mapping and item.type == 'text' -%}
+ {{- item.text }}
+ {%- elif item is string -%}
+ {{- item }}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- else -%}
+ {{- content }}
+ {%- endif -%}
+{%- endmacro -%}
+{#- System Message Construction ============================================ -#}
+{%- macro build_system_message(system_message) -%}
+ {%- if system_message and system_message.content -%}
+ {{- visible_text(system_message.content) }}
+ {%- else -%}
+ {%- if model_identity is not defined -%}
+ {%- set model_identity = "You are a helpful assistant. Your name is MiniMax-M2.5 and is built by MiniMax." -%}
+ {%- endif -%}
+ {{- model_identity }}
+ {%- endif -%}
+
+ {#- Handle current_date -#}
+ {%- if system_message and system_message.current_date -%}
+ {{- '\n' ~ 'Current date: ' + system_message.current_date }}
+ {%- endif -%}
+ {#- Handle current_location -#}
+ {%- if system_message and system_message.current_location -%}
+ {{- '\n' ~ 'Current location: ' + system_message.current_location }}
+ {%- endif -%}
+{%- endmacro -%}
+{#- Main Template Logic ================================================= -#}
+{#- Extract system message (only first message if it's system) -#}
+{%- set system_message = none -%}
+{%- set conversation_messages = messages -%}
+{%- if messages and messages[0].role == "system" -%}
+ {%- set system_message = messages[0] -%}
+ {%- set conversation_messages = messages[1:] -%}
+{%- endif -%}
+{#- Get the last user message turn, for interleved thinking -#}
+{%- set ns = namespace(last_user_index=-1) %}
+{% for m in conversation_messages %}
+ {%- if m.role == 'user' %}
+ {% set ns.last_user_index = loop.index0 -%}
+ {%- endif %}
+{%- endfor %}
+{#- Render system message -#}
+{{- ']~!b[' ~ ']~b]system' ~ '\n' }}
+{{- build_system_message(system_message) }}
+{#- Render tools if available -#}
+{%- if tools -%}
+ {{- '\n\n' ~ '# Tools' ~ '\n' ~ 'You may call one or more tools to assist with the user query.\nHere are the tools available in JSONSchema format:' ~ '\n' }}
+ {{- '\n' ~ '' ~ '\n' }}
+ {{- render_tool_namespace("functions", tools) }}
+ {{- '' ~ '\n\n' }}
+{{- 'When making tool calls, use XML format to invoke tools and pass parameters:' ~ '\n' }}
+{{- '\n' ~ toolcall_begin_token }}
+
+param-value-1
+param-value-2
+...
+
+{{- '\n' ~ toolcall_end_token }}
+{%- endif -%}
+{{- '[e~[\n' }}
+
+{#- Render messages -#}
+{%- set last_tool_call = namespace(name=none) -%}
+{%- for message in conversation_messages -%}
+ {%- if message.role == 'assistant' -%}
+ {#- Only render reasoning_content if no user message follows -#}
+ {{- ']~b]ai' ~ '\n' }}
+
+ {%- set reasoning_content = '' %}
+ {%- set content = visible_text(message.content) %}
+ {%- if message.reasoning_content is string %}
+ {%- set reasoning_content = message.reasoning_content %}
+ {%- else %}
+ {%- if '' in content %}
+ {%- set reasoning_content = content.split('')[0].strip('\n').split('')[-1].strip('\n') %}
+ {%- set content = content.split('')[-1].strip('\n') %}
+ {%- endif %}
+ {%- endif %}
+ {%- if reasoning_content and loop.index0 > ns.last_user_index -%}
+ {{- '' ~ '\n' ~ reasoning_content ~ '\n' ~ '' ~ '\n\n' }}
+ {%- endif -%}
+ {%- if content -%}
+ {{- content }}
+ {%- endif -%}
+ {%- if message.tool_calls -%}
+ {{- '\n' ~ toolcall_begin_token ~ '\n' }}
+
+ {%- for tool_call in message.tool_calls -%}
+ {%- if tool_call.function %}
+ {%- set tool_call = tool_call.function %}
+ {%- endif %}
+ {{- '' }}
+ {% set _args = tool_call.arguments %}
+ {%- for k, v in _args.items() %}
+ {{- '' }}
+ {{- v | tojson(ensure_ascii=False) if v is not string else v }}
+ {{- '' }}
+ {% endfor %}
+ {{- '' ~ '\n' }}
+ {%- endfor -%}
+
+ {{- toolcall_end_token}}
+ {%- set last_tool_call.name = message.tool_calls[-1].name -%}
+ {%- else -%}
+ {%- set last_tool_call.name = none -%}
+ {%- endif -%}
+ {{- '[e~[' ~ '\n' }}
+
+ {%- elif message.role == 'tool' -%}
+ {%- if last_tool_call.name is none -%}
+ {{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
+ {%- endif -%}
+ {%- if loop.first or (conversation_messages[loop.index0 - 1].role != 'tool') -%}
+ {{- ']~b]tool' }}
+ {%- endif -%}
+ {%- if message.content is string -%}
+ {{- '\n' }}
+ {{- message.content }}
+ {{- '' }}
+ {%- else -%}
+ {%- for tr in message.content -%}
+ {{- '\n' }}
+ {{- tr.output if tr.output is defined else (tr.text if tr.type == 'text' and tr.text is defined else tr) }}
+ {{- '\n' }}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- if loop.last or (conversation_messages[loop.index0 + 1].role != 'tool') -%}
+ {{- '[e~[\n' -}}
+ {%- endif -%}
+
+ {%- elif message.role == 'user' -%}
+ {{- ']~b]user' ~ '\n' }}
+ {{- visible_text(message.content) }}
+ {{- '[e~[' ~ '\n' }}
+ {%- endif -%}
+{%- endfor -%}
+
+{#- Generation prompt -#}
+{%- if add_generation_prompt -%}
+{{- ']~b]ai' ~ '\n' ~ '' ~ '\n' }}
+{%- endif -%}
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c1ead22e7efaefedc667bafe6a6dedf628fea4f5
--- /dev/null
+++ b/config.json
@@ -0,0 +1,606 @@
+{
+ "architectures": [
+ "MiniMaxM2ForCausalLM"
+ ],
+ "attn_type_list": [
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1
+ ],
+ "auto_map": {
+ "AutoConfig": "configuration_minimax_m2.MiniMaxM2Config",
+ "AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM"
+ },
+ "eos_token_id": 200020,
+ "head_dim": 128,
+ "hidden_act": "silu",
+ "hidden_size": 3072,
+ "intermediate_size": 1536,
+ "max_position_embeddings": 196608,
+ "model_type": "minimax_m2",
+ "mtp_transformer_layers": 1,
+ "num_attention_heads": 48,
+ "num_experts_per_tok": 8,
+ "num_hidden_layers": 62,
+ "num_key_value_heads": 8,
+ "num_local_experts": 256,
+ "num_mtp_modules": 3,
+ "qk_norm_type": "per_layer",
+ "quantization": {
+ "group_size": 64,
+ "bits": 8,
+ "mode": "affine",
+ "model.layers.0.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.1.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.2.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.3.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.4.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.5.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.6.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.7.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.8.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.9.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.10.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.11.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.12.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.13.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.14.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.15.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.16.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.17.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.18.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.19.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.20.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.21.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.22.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.23.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.24.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.25.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.26.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.27.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.28.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.29.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.30.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.31.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.32.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.33.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.34.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.35.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.36.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.37.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.38.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.39.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.40.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.41.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.42.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.43.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.44.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.45.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.46.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.47.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.48.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.49.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.50.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.51.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.52.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.53.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.54.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.55.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.56.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.57.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.58.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.59.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.60.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.61.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ }
+ },
+ "quantization_config": {
+ "group_size": 64,
+ "bits": 8,
+ "mode": "affine",
+ "model.layers.0.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.1.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.2.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.3.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.4.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.5.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.6.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.7.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.8.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.9.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.10.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.11.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.12.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.13.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.14.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.15.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.16.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.17.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.18.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.19.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.20.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.21.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.22.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.23.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.24.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.25.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.26.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.27.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.28.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.29.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.30.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.31.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.32.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.33.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.34.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.35.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.36.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.37.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.38.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.39.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.40.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.41.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.42.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.43.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.44.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.45.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.46.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.47.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.48.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.49.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.50.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.51.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.52.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.53.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.54.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.55.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.56.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.57.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.58.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.59.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.60.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ },
+ "model.layers.61.block_sparse_moe.gate": {
+ "group_size": 64,
+ "bits": 8
+ }
+ },
+ "rms_norm_eps": 1e-06,
+ "rope_theta": 5000000,
+ "rotary_dim": 64,
+ "scoring_func": "sigmoid",
+ "shared_intermediate_size": 0,
+ "tie_word_embeddings": false,
+ "transformers_version": "4.46.1",
+ "use_cache": true,
+ "use_mtp": true,
+ "use_qk_norm": true,
+ "use_routing_bias": true,
+ "vocab_size": 200064
+}
\ No newline at end of file
diff --git a/generation_config.json b/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..30b418a48e04bf5e6d584093aa23393614678619
--- /dev/null
+++ b/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "bos_token_id": 200019,
+ "do_sample": true,
+ "eos_token_id": 200020,
+ "temperature": 1.0,
+ "top_p": 0.95,
+ "top_k": 40,
+ "transformers_version": "4.46.1"
+}
diff --git a/model-00001-of-00047.safetensors b/model-00001-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4b6ef9edd8a4ead30de0fa85a345920c3020c720
--- /dev/null
+++ b/model-00001-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d01399535f4c7f06e21e7bdde51c90f833bb3a5e6d44a5db09668c04017e8d3e
+size 4598782614
diff --git a/model-00002-of-00047.safetensors b/model-00002-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b3713d61764317ad4fc9090bbc204be3894cdb1d
--- /dev/null
+++ b/model-00002-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:409a2518796c87f35b6c45dd4bca122110d25411bea98ad9ba1ee0c479f1b60a
+size 5181537162
diff --git a/model-00003-of-00047.safetensors b/model-00003-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3d6a2b6e4b5a383f6d60ebe26bd8bb34d741ce77
--- /dev/null
+++ b/model-00003-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9b77317cad72228cab0e9f563f649783b7fc1de3b50522b5d29a494a161ad1d
+size 5181537168
diff --git a/model-00004-of-00047.safetensors b/model-00004-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d9c204d557b585bfc26c7ad03b5e909340c6f1db
--- /dev/null
+++ b/model-00004-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aa4ac52f6a02069dea7122d33ca58a2c0ae4d0029af8218a9f3083a0bd48ec39
+size 5229244520
diff --git a/model-00006-of-00047.safetensors b/model-00006-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e2b2add3e13fe90870fa52a3c548062f421f4d7a
--- /dev/null
+++ b/model-00006-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:330651fec38932a91ff09930cb670914c43e07545af68ae7eefa30a8e78ede8a
+size 5181537174
diff --git a/model-00007-of-00047.safetensors b/model-00007-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f67946021f44ae5188e776fbf612af7d1c514263
--- /dev/null
+++ b/model-00007-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:06cb5249c7430d39478ed2a58c5b87bddfceda261d05aeca0563560ec6e68b4b
+size 5229244550
diff --git a/model-00008-of-00047.safetensors b/model-00008-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..82b81acd88b69aa5f5d322422e5701da6ebabb4e
--- /dev/null
+++ b/model-00008-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a404d54845aeb6136f7269d2035c1b084b70f898fc5a2f801f9174849c5c45a
+size 5181537206
diff --git a/model-00010-of-00047.safetensors b/model-00010-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4c1b1b593d70530cbfef4db4dc017cef29373749
--- /dev/null
+++ b/model-00010-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9b2a640eebff70e8cf6b1b34ac78b97d21009edb071dd43ee717bc9ab995d26
+size 5229244604
diff --git a/model-00012-of-00047.safetensors b/model-00012-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..41674d0503163a0b64b1e997d43a31aea94b259d
--- /dev/null
+++ b/model-00012-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f62936689bb9270dd35df0ea2538bf8636303de70a8b84a574caefa2be35ff56
+size 5181537234
diff --git a/model-00013-of-00047.safetensors b/model-00013-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d76fb2f985a6ee1d08b3c62d7d9ae6f8dc8c5297
--- /dev/null
+++ b/model-00013-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9243cf1e64de18cd479b26005a8309e44e6f72fbd3ee14ee299742d845dc8de6
+size 5229244594
diff --git a/model-00014-of-00047.safetensors b/model-00014-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0725c3098f757a001d423d24c95fe62fedf49ab8
--- /dev/null
+++ b/model-00014-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:485a9c7906795a60b164f36c7a2f4dbdf14d6a0555f6395a92484f95d998c93e
+size 5181537232
diff --git a/model-00016-of-00047.safetensors b/model-00016-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9d65ef60abeaa8dc89da550d1d76febc17b439bd
--- /dev/null
+++ b/model-00016-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:800603c19d09a130182721089f03992796a5aacd4051ef27f7fb5c912be2f10a
+size 5229244568
diff --git a/model-00017-of-00047.safetensors b/model-00017-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..cd408219a395bce52d50efb97333340dd39c4c0c
--- /dev/null
+++ b/model-00017-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6bd12984c00aa6d9c3b41d60a2e79e0305bcd848886485eb0981091bcf52821
+size 5181537236
diff --git a/model-00018-of-00047.safetensors b/model-00018-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..fdba345c8debbb3dedaaa7c45e67b465ddcdfd13
--- /dev/null
+++ b/model-00018-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f5fcd0f60d44facb2b006015d9ca1307cb5513533512fce3724ea105bf6498ee
+size 5181537226
diff --git a/model-00019-of-00047.safetensors b/model-00019-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2126033257a4b2ef7994f68595ae9b126b97df33
--- /dev/null
+++ b/model-00019-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6091f175101410776e0ee6f10ac13f581204fc16b0fe5de3e4aa5ca1041108d5
+size 5229244580
diff --git a/model-00020-of-00047.safetensors b/model-00020-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..fb457094f55131e47fca2962a80e99029f9b27d0
--- /dev/null
+++ b/model-00020-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bbfa2c0689b90d6f923be3f2c159a0648400b123e70d6899c5c02362c21235dc
+size 5181537232
diff --git a/model-00021-of-00047.safetensors b/model-00021-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..24032eda7f7c1cee5f7812275e151efc587f19f8
--- /dev/null
+++ b/model-00021-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15538a8f83ead1fc1935a13b06da57759dd6e5dd3458d566f96ae55a27fbb4ce
+size 5181537234
diff --git a/model-00022-of-00047.safetensors b/model-00022-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..49e540120ced18bc6e94115f7553631d8ca65b39
--- /dev/null
+++ b/model-00022-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eed8de603db8d2fbf49c91376ca9154feb9c1e78c0c1f763a69bd84f096ba567
+size 5229244586
diff --git a/model-00023-of-00047.safetensors b/model-00023-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b79ffa5d1291d0d3aa9f28feb2102eb001879eec
--- /dev/null
+++ b/model-00023-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:072c46d81de6b77a7f17f8cd2e74bfce1bd449491bec6c1a7667c6f4a9de9105
+size 5181537236
diff --git a/model-00024-of-00047.safetensors b/model-00024-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bfea7def0a11108997319f998764d60ec002e119
--- /dev/null
+++ b/model-00024-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8332bad73fab9c482454b87a461ea2fad2e03e7ff818ce2ae0a6667a0a3e0a7e
+size 5181537206
diff --git a/model-00025-of-00047.safetensors b/model-00025-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8df0c8daf16f37aa6ae47a244a81c7151e533023
--- /dev/null
+++ b/model-00025-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:42668a2d4a549d32c85c8b43ed9f13454fc30b5fe37ffc7ca7a9672d758e2446
+size 5229244610
diff --git a/model-00026-of-00047.safetensors b/model-00026-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..276038cb5cccf4e81e91b11104e7669752e71ae8
--- /dev/null
+++ b/model-00026-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f620767204096a6284077a074849f6cb70f7c2e0a48999864c126c4a38f6b3a8
+size 5181537222
diff --git a/model-00027-of-00047.safetensors b/model-00027-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..77a78d3a6b30095fd1663018a0e094a65d6f1d27
--- /dev/null
+++ b/model-00027-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f829512c3cfe5b3a103cec7a2446edac596577297b59fa8543904b3f61e33a8d
+size 5181537198
diff --git a/model-00028-of-00047.safetensors b/model-00028-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a88c69b5ba1f0b3bdb978baa3abcfc1872fb282e
--- /dev/null
+++ b/model-00028-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8aa8d712ffa9cb3346db2e4281fd052727efbb2fb3a767c28cc0660904878f91
+size 5229244570
diff --git a/model-00029-of-00047.safetensors b/model-00029-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0af219f1dcc6e73a74b1d8c29de43f55803167c9
--- /dev/null
+++ b/model-00029-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d05266ca7d7f9046b4fa0f831dfb678a77eab7b529e0882a12af05f89dab8de0
+size 5181537216
diff --git a/model-00030-of-00047.safetensors b/model-00030-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..585ac684c3c3672581f61adfdc870eb75e3cde75
--- /dev/null
+++ b/model-00030-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b54d99239008658c0e4f5811b07521a10217cedf40239b0045aac7983913211c
+size 5181537202
diff --git a/model-00031-of-00047.safetensors b/model-00031-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..aedb0508f07b0462c90fd379ba8d82c9e8188bc5
--- /dev/null
+++ b/model-00031-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:44f3401533b3630a9f36c334e6f56f5bf3885a0a2194685d65964fb297401573
+size 5229244614
diff --git a/model-00032-of-00047.safetensors b/model-00032-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0011c599badf186b2acaeed15bc318dff54ba6d5
--- /dev/null
+++ b/model-00032-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e5b7ed6d032a805a9a4ec0c8fb51d9dde3d9a6afdcefd0972ed9d1ace9c6a99
+size 5181537236
diff --git a/model-00033-of-00047.safetensors b/model-00033-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c7605eb5708f290a60525c3efd15980a5204f768
--- /dev/null
+++ b/model-00033-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ecd41d7f326ed0f656b893469c0ed0336a2528f72ad5d5af20fdc383d1edb5d5
+size 5181537230
diff --git a/model-00034-of-00047.safetensors b/model-00034-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c67841412a6c016cdd1e942f7fb89070b787edb4
--- /dev/null
+++ b/model-00034-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f731262f01dea5888542fdf34f5ea940053f0e0c013e923309935fff6f533d8e
+size 5229244602
diff --git a/model-00035-of-00047.safetensors b/model-00035-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..de52e63a05b28f4f7ce1229d53a84fcbe1690b51
--- /dev/null
+++ b/model-00035-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e0c0ed047f6676d9e0121a3e62c2a1f6cce1d189336d620de0ff7849cf82bf2
+size 5181537236
diff --git a/model-00036-of-00047.safetensors b/model-00036-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a6e39b7303eb634dec73cf768175e3968fe242ec
--- /dev/null
+++ b/model-00036-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:945dac2db11d8ed2535b6286b2943c555f5bc1d39398b167ae856bfa07f0e669
+size 5181537230
diff --git a/model-00037-of-00047.safetensors b/model-00037-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..580fc4f88bfc0f1cb7bd8ed22896d1e4dfd0ab62
--- /dev/null
+++ b/model-00037-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd318601732e8853cd6110022b3a17772e10087bcc554d057d54680e2004234f
+size 5229244584
diff --git a/model-00038-of-00047.safetensors b/model-00038-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4e6dd60062ac3d296bd8ac61d50c706e6fa3a1e4
--- /dev/null
+++ b/model-00038-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f335fb2e9f0706acf2cd90bac4c2a20c77de98542acf4e5b87995511e99e4e8f
+size 5181537202
diff --git a/model-00039-of-00047.safetensors b/model-00039-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..94f64c5d781cb8465bb7bd9cfbaf299711eb711a
--- /dev/null
+++ b/model-00039-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f70801100e3db0db81ec6f05a5a5233e5d4d442cbf2a23ff90c4baaaa408c227
+size 5181537230
diff --git a/model-00040-of-00047.safetensors b/model-00040-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ca57ee6e02f8dd4df2d30a76d416c19e1ad824ad
--- /dev/null
+++ b/model-00040-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:871888e413c1dce6cfc5f63c399ad923bb7b173dea682bb1eab66e20abe646e7
+size 5229244560
diff --git a/model-00041-of-00047.safetensors b/model-00041-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..685555fa0bb803c0b5c5dacbb3c39101db665c98
--- /dev/null
+++ b/model-00041-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e3192622c4ace0b8cbde91927f81dcb21d65557f533d28939c74436a9cae124
+size 5181537240
diff --git a/model-00042-of-00047.safetensors b/model-00042-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..cf8d9876c81afa954c8167997c5ad048fa9af3ed
--- /dev/null
+++ b/model-00042-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:37b0bae6729062c9879744a81037127ef901c49afa9fe61b4c78a8b648a9fdee
+size 5181537218
diff --git a/model-00043-of-00047.safetensors b/model-00043-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..87aef5ab1c46640a0b11617d187ab982ad67c327
--- /dev/null
+++ b/model-00043-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7bd8fe0ff57a14186164320a1573492deee5a8db4f1ce901c486ed884d9f63c0
+size 5229244560
diff --git a/model-00044-of-00047.safetensors b/model-00044-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6ed61c2358939c6a29e952a14d0a2ba63d3317c0
--- /dev/null
+++ b/model-00044-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4cc40761ce6361189bd0af67a5e3cde814f075e84649e59932cdb04c75cbbf73
+size 5181537240
diff --git a/model-00045-of-00047.safetensors b/model-00045-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9f5a7c7a8e0179f261112f1b17a1a3c7e0540cf0
--- /dev/null
+++ b/model-00045-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f379d8862c5da75c3a41245aef229a99df67d667e6aff79507932bc51c79127c
+size 5181537230
diff --git a/model-00046-of-00047.safetensors b/model-00046-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..db2546b58643125924fa1a2196bfdbcda3b95291
--- /dev/null
+++ b/model-00046-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03e26c66d85f9e69b4d6160c4e796f89fb5eb8ed03d515d397ae41ff70715f8a
+size 5229244594
diff --git a/model-00047-of-00047.safetensors b/model-00047-of-00047.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..de14e31b2efd98f30727873714ec0dc87b0f1a9d
--- /dev/null
+++ b/model-00047-of-00047.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0a1a5ea26a0112eb73564c5bc8b09248dfa1893afb63f32d27147008e3510344
+size 4503401440
diff --git a/model.safetensors.index.json b/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..4480843a0f665583e0b87afe098d32266e3a4345
--- /dev/null
+++ b/model.safetensors.index.json
@@ -0,0 +1,1813 @@
+{
+ "metadata": {
+ "total_size": 242986745856,
+ "total_parameters": 228689748992
+ },
+ "weight_map": {
+ "lm_head.biases": "model-00047-of-00047.safetensors",
+ "lm_head.scales": "model-00047-of-00047.safetensors",
+ "lm_head.weight": "model-00047-of-00047.safetensors",
+ "model.embed_tokens.biases": "model-00001-of-00047.safetensors",
+ "model.embed_tokens.scales": "model-00001-of-00047.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.e_score_correction_bias": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.gate.biases": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.gate.scales": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.gate.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.switch_mlp.down_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.switch_mlp.down_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.switch_mlp.down_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.switch_mlp.up_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.switch_mlp.up_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.0.block_sparse_moe.switch_mlp.up_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.k_norm.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.k_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.k_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.o_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.o_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.q_norm.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.q_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.q_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.v_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.v_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.e_score_correction_bias": "model-00002-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.gate.biases": "model-00001-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.gate.scales": "model-00001-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.gate.weight": "model-00001-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.switch_mlp.down_proj.biases": "model-00002-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.switch_mlp.down_proj.scales": "model-00002-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.switch_mlp.down_proj.weight": "model-00002-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00002-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00002-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00002-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.switch_mlp.up_proj.biases": "model-00002-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.switch_mlp.up_proj.scales": "model-00002-of-00047.safetensors",
+ "model.layers.1.block_sparse_moe.switch_mlp.up_proj.weight": "model-00002-of-00047.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00002-of-00047.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00002-of-00047.safetensors",
+ "model.layers.1.self_attn.k_norm.weight": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.k_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.k_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.o_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.o_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.q_norm.weight": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.q_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.q_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.v_proj.biases": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.v_proj.scales": "model-00001-of-00047.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.e_score_correction_bias": "model-00009-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.gate.biases": "model-00008-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.gate.scales": "model-00008-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.gate.weight": "model-00008-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.switch_mlp.down_proj.biases": "model-00009-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.switch_mlp.down_proj.scales": "model-00009-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.switch_mlp.down_proj.weight": "model-00009-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00008-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00008-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00008-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.switch_mlp.up_proj.biases": "model-00009-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.switch_mlp.up_proj.scales": "model-00009-of-00047.safetensors",
+ "model.layers.10.block_sparse_moe.switch_mlp.up_proj.weight": "model-00009-of-00047.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00009-of-00047.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00009-of-00047.safetensors",
+ "model.layers.10.self_attn.k_norm.weight": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.k_proj.biases": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.k_proj.scales": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.o_proj.biases": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.o_proj.scales": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.q_norm.weight": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.q_proj.biases": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.q_proj.scales": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.v_proj.biases": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.v_proj.scales": "model-00008-of-00047.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00008-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.e_score_correction_bias": "model-00010-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.gate.biases": "model-00009-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.gate.scales": "model-00009-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.gate.weight": "model-00009-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.switch_mlp.down_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.switch_mlp.down_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.switch_mlp.down_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00009-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00009-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00009-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.switch_mlp.up_proj.biases": "model-00009-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.switch_mlp.up_proj.scales": "model-00009-of-00047.safetensors",
+ "model.layers.11.block_sparse_moe.switch_mlp.up_proj.weight": "model-00009-of-00047.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00010-of-00047.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00010-of-00047.safetensors",
+ "model.layers.11.self_attn.k_norm.weight": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.k_proj.biases": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.k_proj.scales": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.o_proj.biases": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.o_proj.scales": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.q_norm.weight": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.q_proj.biases": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.q_proj.scales": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.v_proj.biases": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.v_proj.scales": "model-00009-of-00047.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00009-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.e_score_correction_bias": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.gate.biases": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.gate.scales": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.gate.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.switch_mlp.down_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.switch_mlp.down_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.switch_mlp.down_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.switch_mlp.up_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.switch_mlp.up_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.12.block_sparse_moe.switch_mlp.up_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.k_norm.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.k_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.k_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.o_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.o_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.q_norm.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.q_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.q_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.v_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.v_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.e_score_correction_bias": "model-00011-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.gate.biases": "model-00010-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.gate.scales": "model-00010-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.gate.weight": "model-00010-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.switch_mlp.down_proj.biases": "model-00011-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.switch_mlp.down_proj.scales": "model-00011-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.switch_mlp.down_proj.weight": "model-00011-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00011-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00011-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00011-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.switch_mlp.up_proj.biases": "model-00011-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.switch_mlp.up_proj.scales": "model-00011-of-00047.safetensors",
+ "model.layers.13.block_sparse_moe.switch_mlp.up_proj.weight": "model-00011-of-00047.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00011-of-00047.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00011-of-00047.safetensors",
+ "model.layers.13.self_attn.k_norm.weight": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.k_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.k_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.o_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.o_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.q_norm.weight": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.q_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.q_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.v_proj.biases": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.v_proj.scales": "model-00010-of-00047.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00010-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.e_score_correction_bias": "model-00012-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.gate.biases": "model-00011-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.gate.scales": "model-00011-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.gate.weight": "model-00011-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.switch_mlp.down_proj.biases": "model-00012-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.switch_mlp.down_proj.scales": "model-00012-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.switch_mlp.down_proj.weight": "model-00012-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00011-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00011-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00011-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.switch_mlp.up_proj.biases": "model-00012-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.switch_mlp.up_proj.scales": "model-00012-of-00047.safetensors",
+ "model.layers.14.block_sparse_moe.switch_mlp.up_proj.weight": "model-00012-of-00047.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00012-of-00047.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00012-of-00047.safetensors",
+ "model.layers.14.self_attn.k_norm.weight": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.k_proj.biases": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.k_proj.scales": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.o_proj.biases": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.o_proj.scales": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.q_norm.weight": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.q_proj.biases": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.q_proj.scales": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.v_proj.biases": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.v_proj.scales": "model-00011-of-00047.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00011-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.e_score_correction_bias": "model-00013-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.gate.biases": "model-00012-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.gate.scales": "model-00012-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.gate.weight": "model-00012-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.switch_mlp.down_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.switch_mlp.down_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.switch_mlp.down_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00012-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00012-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00012-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.switch_mlp.up_proj.biases": "model-00012-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.switch_mlp.up_proj.scales": "model-00012-of-00047.safetensors",
+ "model.layers.15.block_sparse_moe.switch_mlp.up_proj.weight": "model-00012-of-00047.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00013-of-00047.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00013-of-00047.safetensors",
+ "model.layers.15.self_attn.k_norm.weight": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.k_proj.biases": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.k_proj.scales": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.o_proj.biases": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.o_proj.scales": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.q_norm.weight": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.q_proj.biases": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.q_proj.scales": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.v_proj.biases": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.v_proj.scales": "model-00012-of-00047.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00012-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.e_score_correction_bias": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.gate.biases": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.gate.scales": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.gate.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.switch_mlp.down_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.switch_mlp.down_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.switch_mlp.down_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.switch_mlp.up_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.switch_mlp.up_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.16.block_sparse_moe.switch_mlp.up_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.k_norm.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.k_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.k_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.o_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.o_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.q_norm.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.q_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.q_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.v_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.v_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.e_score_correction_bias": "model-00014-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.gate.biases": "model-00013-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.gate.scales": "model-00013-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.gate.weight": "model-00013-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.switch_mlp.down_proj.biases": "model-00014-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.switch_mlp.down_proj.scales": "model-00014-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.switch_mlp.down_proj.weight": "model-00014-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00014-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00014-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00014-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.switch_mlp.up_proj.biases": "model-00014-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.switch_mlp.up_proj.scales": "model-00014-of-00047.safetensors",
+ "model.layers.17.block_sparse_moe.switch_mlp.up_proj.weight": "model-00014-of-00047.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00014-of-00047.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00014-of-00047.safetensors",
+ "model.layers.17.self_attn.k_norm.weight": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.k_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.k_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.o_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.o_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.q_norm.weight": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.q_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.q_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.v_proj.biases": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.v_proj.scales": "model-00013-of-00047.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00013-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.e_score_correction_bias": "model-00015-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.gate.biases": "model-00014-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.gate.scales": "model-00014-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.gate.weight": "model-00014-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.switch_mlp.down_proj.biases": "model-00015-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.switch_mlp.down_proj.scales": "model-00015-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.switch_mlp.down_proj.weight": "model-00015-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00014-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00014-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00014-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.switch_mlp.up_proj.biases": "model-00015-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.switch_mlp.up_proj.scales": "model-00015-of-00047.safetensors",
+ "model.layers.18.block_sparse_moe.switch_mlp.up_proj.weight": "model-00015-of-00047.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00015-of-00047.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00015-of-00047.safetensors",
+ "model.layers.18.self_attn.k_norm.weight": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.k_proj.biases": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.k_proj.scales": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.o_proj.biases": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.o_proj.scales": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.q_norm.weight": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.q_proj.biases": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.q_proj.scales": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.v_proj.biases": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.v_proj.scales": "model-00014-of-00047.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00014-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.e_score_correction_bias": "model-00016-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.gate.biases": "model-00015-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.gate.scales": "model-00015-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.gate.weight": "model-00015-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.switch_mlp.down_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.switch_mlp.down_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.switch_mlp.down_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00015-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00015-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00015-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.switch_mlp.up_proj.biases": "model-00015-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.switch_mlp.up_proj.scales": "model-00015-of-00047.safetensors",
+ "model.layers.19.block_sparse_moe.switch_mlp.up_proj.weight": "model-00015-of-00047.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00016-of-00047.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00016-of-00047.safetensors",
+ "model.layers.19.self_attn.k_norm.weight": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.k_proj.biases": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.k_proj.scales": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.o_proj.biases": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.o_proj.scales": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.q_norm.weight": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.q_proj.biases": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.q_proj.scales": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.v_proj.biases": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.v_proj.scales": "model-00015-of-00047.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00015-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.e_score_correction_bias": "model-00003-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.gate.biases": "model-00002-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.gate.scales": "model-00002-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.gate.weight": "model-00002-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.switch_mlp.down_proj.biases": "model-00003-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.switch_mlp.down_proj.scales": "model-00003-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.switch_mlp.down_proj.weight": "model-00003-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00002-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00002-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00002-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.switch_mlp.up_proj.biases": "model-00003-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.switch_mlp.up_proj.scales": "model-00003-of-00047.safetensors",
+ "model.layers.2.block_sparse_moe.switch_mlp.up_proj.weight": "model-00003-of-00047.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00003-of-00047.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00003-of-00047.safetensors",
+ "model.layers.2.self_attn.k_norm.weight": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.k_proj.biases": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.k_proj.scales": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.o_proj.biases": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.o_proj.scales": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.q_norm.weight": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.q_proj.biases": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.q_proj.scales": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.v_proj.biases": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.v_proj.scales": "model-00002-of-00047.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00002-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.e_score_correction_bias": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.gate.biases": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.gate.scales": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.gate.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.switch_mlp.down_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.switch_mlp.down_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.switch_mlp.down_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.switch_mlp.up_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.switch_mlp.up_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.20.block_sparse_moe.switch_mlp.up_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.k_norm.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.k_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.k_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.o_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.o_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.q_norm.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.q_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.q_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.v_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.v_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.e_score_correction_bias": "model-00017-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.gate.biases": "model-00016-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.gate.scales": "model-00016-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.gate.weight": "model-00016-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.switch_mlp.down_proj.biases": "model-00017-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.switch_mlp.down_proj.scales": "model-00017-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.switch_mlp.down_proj.weight": "model-00017-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00017-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00017-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00017-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.switch_mlp.up_proj.biases": "model-00017-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.switch_mlp.up_proj.scales": "model-00017-of-00047.safetensors",
+ "model.layers.21.block_sparse_moe.switch_mlp.up_proj.weight": "model-00017-of-00047.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00017-of-00047.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00017-of-00047.safetensors",
+ "model.layers.21.self_attn.k_norm.weight": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.k_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.k_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.o_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.o_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.q_norm.weight": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.q_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.q_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.v_proj.biases": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.v_proj.scales": "model-00016-of-00047.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00016-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.e_score_correction_bias": "model-00018-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.gate.biases": "model-00017-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.gate.scales": "model-00017-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.gate.weight": "model-00017-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.switch_mlp.down_proj.biases": "model-00018-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.switch_mlp.down_proj.scales": "model-00018-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.switch_mlp.down_proj.weight": "model-00018-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00017-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00017-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00017-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.switch_mlp.up_proj.biases": "model-00018-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.switch_mlp.up_proj.scales": "model-00018-of-00047.safetensors",
+ "model.layers.22.block_sparse_moe.switch_mlp.up_proj.weight": "model-00018-of-00047.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00018-of-00047.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00018-of-00047.safetensors",
+ "model.layers.22.self_attn.k_norm.weight": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.k_proj.biases": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.k_proj.scales": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.o_proj.biases": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.o_proj.scales": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.q_norm.weight": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.q_proj.biases": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.q_proj.scales": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.v_proj.biases": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.v_proj.scales": "model-00017-of-00047.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00017-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.e_score_correction_bias": "model-00019-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.gate.biases": "model-00018-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.gate.scales": "model-00018-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.gate.weight": "model-00018-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.switch_mlp.down_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.switch_mlp.down_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.switch_mlp.down_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00018-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00018-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00018-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.switch_mlp.up_proj.biases": "model-00018-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.switch_mlp.up_proj.scales": "model-00018-of-00047.safetensors",
+ "model.layers.23.block_sparse_moe.switch_mlp.up_proj.weight": "model-00018-of-00047.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00019-of-00047.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00019-of-00047.safetensors",
+ "model.layers.23.self_attn.k_norm.weight": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.k_proj.biases": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.k_proj.scales": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.o_proj.biases": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.o_proj.scales": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.q_norm.weight": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.q_proj.biases": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.q_proj.scales": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.v_proj.biases": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.v_proj.scales": "model-00018-of-00047.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00018-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.e_score_correction_bias": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.gate.biases": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.gate.scales": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.gate.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.switch_mlp.down_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.switch_mlp.down_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.switch_mlp.down_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.switch_mlp.up_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.switch_mlp.up_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.24.block_sparse_moe.switch_mlp.up_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.k_norm.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.k_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.k_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.o_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.o_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.q_norm.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.q_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.q_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.v_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.v_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.e_score_correction_bias": "model-00020-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.gate.biases": "model-00019-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.gate.scales": "model-00019-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.gate.weight": "model-00019-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.switch_mlp.down_proj.biases": "model-00020-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.switch_mlp.down_proj.scales": "model-00020-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.switch_mlp.down_proj.weight": "model-00020-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00020-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00020-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00020-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.switch_mlp.up_proj.biases": "model-00020-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.switch_mlp.up_proj.scales": "model-00020-of-00047.safetensors",
+ "model.layers.25.block_sparse_moe.switch_mlp.up_proj.weight": "model-00020-of-00047.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00020-of-00047.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00020-of-00047.safetensors",
+ "model.layers.25.self_attn.k_norm.weight": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.k_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.k_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.o_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.o_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.q_norm.weight": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.q_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.q_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.v_proj.biases": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.v_proj.scales": "model-00019-of-00047.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00019-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.e_score_correction_bias": "model-00021-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.gate.biases": "model-00020-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.gate.scales": "model-00020-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.gate.weight": "model-00020-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.switch_mlp.down_proj.biases": "model-00021-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.switch_mlp.down_proj.scales": "model-00021-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.switch_mlp.down_proj.weight": "model-00021-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00020-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00020-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00020-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.switch_mlp.up_proj.biases": "model-00021-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.switch_mlp.up_proj.scales": "model-00021-of-00047.safetensors",
+ "model.layers.26.block_sparse_moe.switch_mlp.up_proj.weight": "model-00021-of-00047.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00021-of-00047.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00021-of-00047.safetensors",
+ "model.layers.26.self_attn.k_norm.weight": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.k_proj.biases": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.k_proj.scales": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.o_proj.biases": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.o_proj.scales": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.q_norm.weight": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.q_proj.biases": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.q_proj.scales": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.v_proj.biases": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.v_proj.scales": "model-00020-of-00047.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00020-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.e_score_correction_bias": "model-00022-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.gate.biases": "model-00021-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.gate.scales": "model-00021-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.gate.weight": "model-00021-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.switch_mlp.down_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.switch_mlp.down_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.switch_mlp.down_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00021-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00021-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00021-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.switch_mlp.up_proj.biases": "model-00021-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.switch_mlp.up_proj.scales": "model-00021-of-00047.safetensors",
+ "model.layers.27.block_sparse_moe.switch_mlp.up_proj.weight": "model-00021-of-00047.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00022-of-00047.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00022-of-00047.safetensors",
+ "model.layers.27.self_attn.k_norm.weight": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.k_proj.biases": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.k_proj.scales": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.o_proj.biases": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.o_proj.scales": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.q_norm.weight": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.q_proj.biases": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.q_proj.scales": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.v_proj.biases": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.v_proj.scales": "model-00021-of-00047.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00021-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.e_score_correction_bias": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.gate.biases": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.gate.scales": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.gate.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.switch_mlp.down_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.switch_mlp.down_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.switch_mlp.down_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.switch_mlp.up_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.switch_mlp.up_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.28.block_sparse_moe.switch_mlp.up_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.k_norm.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.k_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.k_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.o_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.o_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.q_norm.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.q_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.q_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.v_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.v_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.e_score_correction_bias": "model-00023-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.gate.biases": "model-00022-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.gate.scales": "model-00022-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.gate.weight": "model-00022-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.switch_mlp.down_proj.biases": "model-00023-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.switch_mlp.down_proj.scales": "model-00023-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.switch_mlp.down_proj.weight": "model-00023-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00023-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00023-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00023-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.switch_mlp.up_proj.biases": "model-00023-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.switch_mlp.up_proj.scales": "model-00023-of-00047.safetensors",
+ "model.layers.29.block_sparse_moe.switch_mlp.up_proj.weight": "model-00023-of-00047.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00023-of-00047.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00023-of-00047.safetensors",
+ "model.layers.29.self_attn.k_norm.weight": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.k_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.k_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.o_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.o_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.q_norm.weight": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.q_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.q_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.v_proj.biases": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.v_proj.scales": "model-00022-of-00047.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00022-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.e_score_correction_bias": "model-00004-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.gate.biases": "model-00003-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.gate.scales": "model-00003-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.gate.weight": "model-00003-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.switch_mlp.down_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.switch_mlp.down_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.switch_mlp.down_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00003-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00003-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00003-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.switch_mlp.up_proj.biases": "model-00003-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.switch_mlp.up_proj.scales": "model-00003-of-00047.safetensors",
+ "model.layers.3.block_sparse_moe.switch_mlp.up_proj.weight": "model-00003-of-00047.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00004-of-00047.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00004-of-00047.safetensors",
+ "model.layers.3.self_attn.k_norm.weight": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.k_proj.biases": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.k_proj.scales": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.o_proj.biases": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.o_proj.scales": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.q_norm.weight": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.q_proj.biases": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.q_proj.scales": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.v_proj.biases": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.v_proj.scales": "model-00003-of-00047.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00003-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.e_score_correction_bias": "model-00024-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.gate.biases": "model-00023-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.gate.scales": "model-00023-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.gate.weight": "model-00023-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.switch_mlp.down_proj.biases": "model-00024-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.switch_mlp.down_proj.scales": "model-00024-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.switch_mlp.down_proj.weight": "model-00024-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00023-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00023-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00023-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.switch_mlp.up_proj.biases": "model-00024-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.switch_mlp.up_proj.scales": "model-00024-of-00047.safetensors",
+ "model.layers.30.block_sparse_moe.switch_mlp.up_proj.weight": "model-00024-of-00047.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00024-of-00047.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00024-of-00047.safetensors",
+ "model.layers.30.self_attn.k_norm.weight": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.k_proj.biases": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.k_proj.scales": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.o_proj.biases": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.o_proj.scales": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.q_norm.weight": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.q_proj.biases": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.q_proj.scales": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.v_proj.biases": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.v_proj.scales": "model-00023-of-00047.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00023-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.e_score_correction_bias": "model-00025-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.gate.biases": "model-00024-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.gate.scales": "model-00024-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.gate.weight": "model-00024-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.switch_mlp.down_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.switch_mlp.down_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.switch_mlp.down_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00024-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00024-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00024-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.switch_mlp.up_proj.biases": "model-00024-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.switch_mlp.up_proj.scales": "model-00024-of-00047.safetensors",
+ "model.layers.31.block_sparse_moe.switch_mlp.up_proj.weight": "model-00024-of-00047.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00025-of-00047.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00025-of-00047.safetensors",
+ "model.layers.31.self_attn.k_norm.weight": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.k_proj.biases": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.k_proj.scales": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.o_proj.biases": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.o_proj.scales": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.q_norm.weight": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.q_proj.biases": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.q_proj.scales": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.v_proj.biases": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.v_proj.scales": "model-00024-of-00047.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00024-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.e_score_correction_bias": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.gate.biases": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.gate.scales": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.gate.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.switch_mlp.down_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.switch_mlp.down_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.switch_mlp.down_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.switch_mlp.up_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.switch_mlp.up_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.32.block_sparse_moe.switch_mlp.up_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.input_layernorm.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.post_attention_layernorm.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.k_norm.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.k_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.k_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.k_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.o_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.o_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.o_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.q_norm.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.q_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.q_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.q_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.v_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.v_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.32.self_attn.v_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.e_score_correction_bias": "model-00026-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.gate.biases": "model-00025-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.gate.scales": "model-00025-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.gate.weight": "model-00025-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.switch_mlp.down_proj.biases": "model-00026-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.switch_mlp.down_proj.scales": "model-00026-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.switch_mlp.down_proj.weight": "model-00026-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00026-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00026-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00026-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.switch_mlp.up_proj.biases": "model-00026-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.switch_mlp.up_proj.scales": "model-00026-of-00047.safetensors",
+ "model.layers.33.block_sparse_moe.switch_mlp.up_proj.weight": "model-00026-of-00047.safetensors",
+ "model.layers.33.input_layernorm.weight": "model-00026-of-00047.safetensors",
+ "model.layers.33.post_attention_layernorm.weight": "model-00026-of-00047.safetensors",
+ "model.layers.33.self_attn.k_norm.weight": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.k_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.k_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.k_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.o_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.o_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.o_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.q_norm.weight": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.q_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.q_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.q_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.v_proj.biases": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.v_proj.scales": "model-00025-of-00047.safetensors",
+ "model.layers.33.self_attn.v_proj.weight": "model-00025-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.e_score_correction_bias": "model-00027-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.gate.biases": "model-00026-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.gate.scales": "model-00026-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.gate.weight": "model-00026-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.switch_mlp.down_proj.biases": "model-00027-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.switch_mlp.down_proj.scales": "model-00027-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.switch_mlp.down_proj.weight": "model-00027-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00026-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00026-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00026-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.switch_mlp.up_proj.biases": "model-00027-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.switch_mlp.up_proj.scales": "model-00027-of-00047.safetensors",
+ "model.layers.34.block_sparse_moe.switch_mlp.up_proj.weight": "model-00027-of-00047.safetensors",
+ "model.layers.34.input_layernorm.weight": "model-00027-of-00047.safetensors",
+ "model.layers.34.post_attention_layernorm.weight": "model-00027-of-00047.safetensors",
+ "model.layers.34.self_attn.k_norm.weight": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.k_proj.biases": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.k_proj.scales": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.k_proj.weight": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.o_proj.biases": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.o_proj.scales": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.o_proj.weight": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.q_norm.weight": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.q_proj.biases": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.q_proj.scales": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.q_proj.weight": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.v_proj.biases": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.v_proj.scales": "model-00026-of-00047.safetensors",
+ "model.layers.34.self_attn.v_proj.weight": "model-00026-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.e_score_correction_bias": "model-00028-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.gate.biases": "model-00027-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.gate.scales": "model-00027-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.gate.weight": "model-00027-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.switch_mlp.down_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.switch_mlp.down_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.switch_mlp.down_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00027-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00027-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00027-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.switch_mlp.up_proj.biases": "model-00027-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.switch_mlp.up_proj.scales": "model-00027-of-00047.safetensors",
+ "model.layers.35.block_sparse_moe.switch_mlp.up_proj.weight": "model-00027-of-00047.safetensors",
+ "model.layers.35.input_layernorm.weight": "model-00028-of-00047.safetensors",
+ "model.layers.35.post_attention_layernorm.weight": "model-00028-of-00047.safetensors",
+ "model.layers.35.self_attn.k_norm.weight": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.k_proj.biases": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.k_proj.scales": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.k_proj.weight": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.o_proj.biases": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.o_proj.scales": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.o_proj.weight": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.q_norm.weight": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.q_proj.biases": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.q_proj.scales": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.q_proj.weight": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.v_proj.biases": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.v_proj.scales": "model-00027-of-00047.safetensors",
+ "model.layers.35.self_attn.v_proj.weight": "model-00027-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.e_score_correction_bias": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.gate.biases": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.gate.scales": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.gate.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.switch_mlp.down_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.switch_mlp.down_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.switch_mlp.down_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.switch_mlp.up_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.switch_mlp.up_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.36.block_sparse_moe.switch_mlp.up_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.input_layernorm.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.post_attention_layernorm.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.k_norm.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.k_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.k_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.k_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.o_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.o_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.o_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.q_norm.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.q_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.q_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.q_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.v_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.v_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.36.self_attn.v_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.e_score_correction_bias": "model-00029-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.gate.biases": "model-00028-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.gate.scales": "model-00028-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.gate.weight": "model-00028-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.switch_mlp.down_proj.biases": "model-00029-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.switch_mlp.down_proj.scales": "model-00029-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.switch_mlp.down_proj.weight": "model-00029-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00029-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00029-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00029-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.switch_mlp.up_proj.biases": "model-00029-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.switch_mlp.up_proj.scales": "model-00029-of-00047.safetensors",
+ "model.layers.37.block_sparse_moe.switch_mlp.up_proj.weight": "model-00029-of-00047.safetensors",
+ "model.layers.37.input_layernorm.weight": "model-00029-of-00047.safetensors",
+ "model.layers.37.post_attention_layernorm.weight": "model-00029-of-00047.safetensors",
+ "model.layers.37.self_attn.k_norm.weight": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.k_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.k_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.k_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.o_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.o_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.o_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.q_norm.weight": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.q_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.q_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.q_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.v_proj.biases": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.v_proj.scales": "model-00028-of-00047.safetensors",
+ "model.layers.37.self_attn.v_proj.weight": "model-00028-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.e_score_correction_bias": "model-00030-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.gate.biases": "model-00029-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.gate.scales": "model-00029-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.gate.weight": "model-00029-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.switch_mlp.down_proj.biases": "model-00030-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.switch_mlp.down_proj.scales": "model-00030-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.switch_mlp.down_proj.weight": "model-00030-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00029-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00029-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00029-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.switch_mlp.up_proj.biases": "model-00030-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.switch_mlp.up_proj.scales": "model-00030-of-00047.safetensors",
+ "model.layers.38.block_sparse_moe.switch_mlp.up_proj.weight": "model-00030-of-00047.safetensors",
+ "model.layers.38.input_layernorm.weight": "model-00030-of-00047.safetensors",
+ "model.layers.38.post_attention_layernorm.weight": "model-00030-of-00047.safetensors",
+ "model.layers.38.self_attn.k_norm.weight": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.k_proj.biases": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.k_proj.scales": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.k_proj.weight": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.o_proj.biases": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.o_proj.scales": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.o_proj.weight": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.q_norm.weight": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.q_proj.biases": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.q_proj.scales": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.q_proj.weight": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.v_proj.biases": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.v_proj.scales": "model-00029-of-00047.safetensors",
+ "model.layers.38.self_attn.v_proj.weight": "model-00029-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.e_score_correction_bias": "model-00031-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.gate.biases": "model-00030-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.gate.scales": "model-00030-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.gate.weight": "model-00030-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.switch_mlp.down_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.switch_mlp.down_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.switch_mlp.down_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00030-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00030-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00030-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.switch_mlp.up_proj.biases": "model-00030-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.switch_mlp.up_proj.scales": "model-00030-of-00047.safetensors",
+ "model.layers.39.block_sparse_moe.switch_mlp.up_proj.weight": "model-00030-of-00047.safetensors",
+ "model.layers.39.input_layernorm.weight": "model-00031-of-00047.safetensors",
+ "model.layers.39.post_attention_layernorm.weight": "model-00031-of-00047.safetensors",
+ "model.layers.39.self_attn.k_norm.weight": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.k_proj.biases": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.k_proj.scales": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.k_proj.weight": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.o_proj.biases": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.o_proj.scales": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.o_proj.weight": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.q_norm.weight": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.q_proj.biases": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.q_proj.scales": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.q_proj.weight": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.v_proj.biases": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.v_proj.scales": "model-00030-of-00047.safetensors",
+ "model.layers.39.self_attn.v_proj.weight": "model-00030-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.e_score_correction_bias": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.gate.biases": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.gate.scales": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.gate.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.switch_mlp.down_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.switch_mlp.down_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.switch_mlp.down_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.switch_mlp.up_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.switch_mlp.up_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.4.block_sparse_moe.switch_mlp.up_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.k_norm.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.k_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.k_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.o_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.o_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.q_norm.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.q_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.q_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.v_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.v_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.e_score_correction_bias": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.gate.biases": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.gate.scales": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.gate.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.switch_mlp.down_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.switch_mlp.down_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.switch_mlp.down_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.switch_mlp.up_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.switch_mlp.up_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.40.block_sparse_moe.switch_mlp.up_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.input_layernorm.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.post_attention_layernorm.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.k_norm.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.k_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.k_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.k_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.o_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.o_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.o_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.q_norm.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.q_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.q_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.q_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.v_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.v_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.40.self_attn.v_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.e_score_correction_bias": "model-00032-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.gate.biases": "model-00031-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.gate.scales": "model-00031-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.gate.weight": "model-00031-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.switch_mlp.down_proj.biases": "model-00032-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.switch_mlp.down_proj.scales": "model-00032-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.switch_mlp.down_proj.weight": "model-00032-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00032-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00032-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00032-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.switch_mlp.up_proj.biases": "model-00032-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.switch_mlp.up_proj.scales": "model-00032-of-00047.safetensors",
+ "model.layers.41.block_sparse_moe.switch_mlp.up_proj.weight": "model-00032-of-00047.safetensors",
+ "model.layers.41.input_layernorm.weight": "model-00032-of-00047.safetensors",
+ "model.layers.41.post_attention_layernorm.weight": "model-00032-of-00047.safetensors",
+ "model.layers.41.self_attn.k_norm.weight": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.k_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.k_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.k_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.o_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.o_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.o_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.q_norm.weight": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.q_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.q_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.q_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.v_proj.biases": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.v_proj.scales": "model-00031-of-00047.safetensors",
+ "model.layers.41.self_attn.v_proj.weight": "model-00031-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.e_score_correction_bias": "model-00033-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.gate.biases": "model-00032-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.gate.scales": "model-00032-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.gate.weight": "model-00032-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.switch_mlp.down_proj.biases": "model-00033-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.switch_mlp.down_proj.scales": "model-00033-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.switch_mlp.down_proj.weight": "model-00033-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00032-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00032-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00032-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.switch_mlp.up_proj.biases": "model-00033-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.switch_mlp.up_proj.scales": "model-00033-of-00047.safetensors",
+ "model.layers.42.block_sparse_moe.switch_mlp.up_proj.weight": "model-00033-of-00047.safetensors",
+ "model.layers.42.input_layernorm.weight": "model-00033-of-00047.safetensors",
+ "model.layers.42.post_attention_layernorm.weight": "model-00033-of-00047.safetensors",
+ "model.layers.42.self_attn.k_norm.weight": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.k_proj.biases": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.k_proj.scales": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.k_proj.weight": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.o_proj.biases": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.o_proj.scales": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.o_proj.weight": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.q_norm.weight": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.q_proj.biases": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.q_proj.scales": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.q_proj.weight": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.v_proj.biases": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.v_proj.scales": "model-00032-of-00047.safetensors",
+ "model.layers.42.self_attn.v_proj.weight": "model-00032-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.e_score_correction_bias": "model-00034-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.gate.biases": "model-00033-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.gate.scales": "model-00033-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.gate.weight": "model-00033-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.switch_mlp.down_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.switch_mlp.down_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.switch_mlp.down_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00033-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00033-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00033-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.switch_mlp.up_proj.biases": "model-00033-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.switch_mlp.up_proj.scales": "model-00033-of-00047.safetensors",
+ "model.layers.43.block_sparse_moe.switch_mlp.up_proj.weight": "model-00033-of-00047.safetensors",
+ "model.layers.43.input_layernorm.weight": "model-00034-of-00047.safetensors",
+ "model.layers.43.post_attention_layernorm.weight": "model-00034-of-00047.safetensors",
+ "model.layers.43.self_attn.k_norm.weight": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.k_proj.biases": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.k_proj.scales": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.k_proj.weight": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.o_proj.biases": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.o_proj.scales": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.o_proj.weight": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.q_norm.weight": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.q_proj.biases": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.q_proj.scales": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.q_proj.weight": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.v_proj.biases": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.v_proj.scales": "model-00033-of-00047.safetensors",
+ "model.layers.43.self_attn.v_proj.weight": "model-00033-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.e_score_correction_bias": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.gate.biases": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.gate.scales": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.gate.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.switch_mlp.down_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.switch_mlp.down_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.switch_mlp.down_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.switch_mlp.up_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.switch_mlp.up_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.44.block_sparse_moe.switch_mlp.up_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.input_layernorm.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.post_attention_layernorm.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.k_norm.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.k_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.k_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.k_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.o_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.o_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.o_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.q_norm.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.q_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.q_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.q_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.v_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.v_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.44.self_attn.v_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.e_score_correction_bias": "model-00035-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.gate.biases": "model-00034-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.gate.scales": "model-00034-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.gate.weight": "model-00034-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.switch_mlp.down_proj.biases": "model-00035-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.switch_mlp.down_proj.scales": "model-00035-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.switch_mlp.down_proj.weight": "model-00035-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00035-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00035-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00035-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.switch_mlp.up_proj.biases": "model-00035-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.switch_mlp.up_proj.scales": "model-00035-of-00047.safetensors",
+ "model.layers.45.block_sparse_moe.switch_mlp.up_proj.weight": "model-00035-of-00047.safetensors",
+ "model.layers.45.input_layernorm.weight": "model-00035-of-00047.safetensors",
+ "model.layers.45.post_attention_layernorm.weight": "model-00035-of-00047.safetensors",
+ "model.layers.45.self_attn.k_norm.weight": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.k_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.k_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.k_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.o_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.o_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.o_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.q_norm.weight": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.q_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.q_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.q_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.v_proj.biases": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.v_proj.scales": "model-00034-of-00047.safetensors",
+ "model.layers.45.self_attn.v_proj.weight": "model-00034-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.e_score_correction_bias": "model-00036-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.gate.biases": "model-00035-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.gate.scales": "model-00035-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.gate.weight": "model-00035-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.switch_mlp.down_proj.biases": "model-00036-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.switch_mlp.down_proj.scales": "model-00036-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.switch_mlp.down_proj.weight": "model-00036-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00035-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00035-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00035-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.switch_mlp.up_proj.biases": "model-00036-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.switch_mlp.up_proj.scales": "model-00036-of-00047.safetensors",
+ "model.layers.46.block_sparse_moe.switch_mlp.up_proj.weight": "model-00036-of-00047.safetensors",
+ "model.layers.46.input_layernorm.weight": "model-00036-of-00047.safetensors",
+ "model.layers.46.post_attention_layernorm.weight": "model-00036-of-00047.safetensors",
+ "model.layers.46.self_attn.k_norm.weight": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.k_proj.biases": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.k_proj.scales": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.k_proj.weight": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.o_proj.biases": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.o_proj.scales": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.o_proj.weight": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.q_norm.weight": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.q_proj.biases": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.q_proj.scales": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.q_proj.weight": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.v_proj.biases": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.v_proj.scales": "model-00035-of-00047.safetensors",
+ "model.layers.46.self_attn.v_proj.weight": "model-00035-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.e_score_correction_bias": "model-00037-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.gate.biases": "model-00036-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.gate.scales": "model-00036-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.gate.weight": "model-00036-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.switch_mlp.down_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.switch_mlp.down_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.switch_mlp.down_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00036-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00036-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00036-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.switch_mlp.up_proj.biases": "model-00036-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.switch_mlp.up_proj.scales": "model-00036-of-00047.safetensors",
+ "model.layers.47.block_sparse_moe.switch_mlp.up_proj.weight": "model-00036-of-00047.safetensors",
+ "model.layers.47.input_layernorm.weight": "model-00037-of-00047.safetensors",
+ "model.layers.47.post_attention_layernorm.weight": "model-00037-of-00047.safetensors",
+ "model.layers.47.self_attn.k_norm.weight": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.k_proj.biases": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.k_proj.scales": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.k_proj.weight": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.o_proj.biases": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.o_proj.scales": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.o_proj.weight": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.q_norm.weight": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.q_proj.biases": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.q_proj.scales": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.q_proj.weight": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.v_proj.biases": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.v_proj.scales": "model-00036-of-00047.safetensors",
+ "model.layers.47.self_attn.v_proj.weight": "model-00036-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.e_score_correction_bias": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.gate.biases": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.gate.scales": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.gate.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.switch_mlp.down_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.switch_mlp.down_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.switch_mlp.down_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.switch_mlp.up_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.switch_mlp.up_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.48.block_sparse_moe.switch_mlp.up_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.input_layernorm.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.post_attention_layernorm.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.k_norm.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.k_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.k_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.k_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.o_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.o_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.o_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.q_norm.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.q_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.q_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.q_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.v_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.v_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.48.self_attn.v_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.e_score_correction_bias": "model-00038-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.gate.biases": "model-00037-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.gate.scales": "model-00037-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.gate.weight": "model-00037-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.switch_mlp.down_proj.biases": "model-00038-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.switch_mlp.down_proj.scales": "model-00038-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.switch_mlp.down_proj.weight": "model-00038-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00038-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00038-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00038-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.switch_mlp.up_proj.biases": "model-00038-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.switch_mlp.up_proj.scales": "model-00038-of-00047.safetensors",
+ "model.layers.49.block_sparse_moe.switch_mlp.up_proj.weight": "model-00038-of-00047.safetensors",
+ "model.layers.49.input_layernorm.weight": "model-00038-of-00047.safetensors",
+ "model.layers.49.post_attention_layernorm.weight": "model-00038-of-00047.safetensors",
+ "model.layers.49.self_attn.k_norm.weight": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.k_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.k_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.k_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.o_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.o_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.o_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.q_norm.weight": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.q_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.q_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.q_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.v_proj.biases": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.v_proj.scales": "model-00037-of-00047.safetensors",
+ "model.layers.49.self_attn.v_proj.weight": "model-00037-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.e_score_correction_bias": "model-00005-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.gate.biases": "model-00004-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.gate.scales": "model-00004-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.gate.weight": "model-00004-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.switch_mlp.down_proj.biases": "model-00005-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.switch_mlp.down_proj.scales": "model-00005-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.switch_mlp.down_proj.weight": "model-00005-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00005-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00005-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00005-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.switch_mlp.up_proj.biases": "model-00005-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.switch_mlp.up_proj.scales": "model-00005-of-00047.safetensors",
+ "model.layers.5.block_sparse_moe.switch_mlp.up_proj.weight": "model-00005-of-00047.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00005-of-00047.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00005-of-00047.safetensors",
+ "model.layers.5.self_attn.k_norm.weight": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.k_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.k_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.o_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.o_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.q_norm.weight": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.q_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.q_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.v_proj.biases": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.v_proj.scales": "model-00004-of-00047.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00004-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.e_score_correction_bias": "model-00039-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.gate.biases": "model-00038-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.gate.scales": "model-00038-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.gate.weight": "model-00038-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.switch_mlp.down_proj.biases": "model-00039-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.switch_mlp.down_proj.scales": "model-00039-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.switch_mlp.down_proj.weight": "model-00039-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00038-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00038-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00038-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.switch_mlp.up_proj.biases": "model-00039-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.switch_mlp.up_proj.scales": "model-00039-of-00047.safetensors",
+ "model.layers.50.block_sparse_moe.switch_mlp.up_proj.weight": "model-00039-of-00047.safetensors",
+ "model.layers.50.input_layernorm.weight": "model-00039-of-00047.safetensors",
+ "model.layers.50.post_attention_layernorm.weight": "model-00039-of-00047.safetensors",
+ "model.layers.50.self_attn.k_norm.weight": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.k_proj.biases": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.k_proj.scales": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.k_proj.weight": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.o_proj.biases": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.o_proj.scales": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.o_proj.weight": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.q_norm.weight": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.q_proj.biases": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.q_proj.scales": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.q_proj.weight": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.v_proj.biases": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.v_proj.scales": "model-00038-of-00047.safetensors",
+ "model.layers.50.self_attn.v_proj.weight": "model-00038-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.e_score_correction_bias": "model-00040-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.gate.biases": "model-00039-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.gate.scales": "model-00039-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.gate.weight": "model-00039-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.switch_mlp.down_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.switch_mlp.down_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.switch_mlp.down_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00039-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00039-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00039-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.switch_mlp.up_proj.biases": "model-00039-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.switch_mlp.up_proj.scales": "model-00039-of-00047.safetensors",
+ "model.layers.51.block_sparse_moe.switch_mlp.up_proj.weight": "model-00039-of-00047.safetensors",
+ "model.layers.51.input_layernorm.weight": "model-00040-of-00047.safetensors",
+ "model.layers.51.post_attention_layernorm.weight": "model-00040-of-00047.safetensors",
+ "model.layers.51.self_attn.k_norm.weight": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.k_proj.biases": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.k_proj.scales": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.k_proj.weight": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.o_proj.biases": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.o_proj.scales": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.o_proj.weight": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.q_norm.weight": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.q_proj.biases": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.q_proj.scales": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.q_proj.weight": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.v_proj.biases": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.v_proj.scales": "model-00039-of-00047.safetensors",
+ "model.layers.51.self_attn.v_proj.weight": "model-00039-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.e_score_correction_bias": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.gate.biases": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.gate.scales": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.gate.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.switch_mlp.down_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.switch_mlp.down_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.switch_mlp.down_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.switch_mlp.up_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.switch_mlp.up_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.52.block_sparse_moe.switch_mlp.up_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.input_layernorm.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.post_attention_layernorm.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.k_norm.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.k_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.k_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.k_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.o_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.o_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.o_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.q_norm.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.q_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.q_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.q_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.v_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.v_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.52.self_attn.v_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.e_score_correction_bias": "model-00041-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.gate.biases": "model-00040-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.gate.scales": "model-00040-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.gate.weight": "model-00040-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.switch_mlp.down_proj.biases": "model-00041-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.switch_mlp.down_proj.scales": "model-00041-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.switch_mlp.down_proj.weight": "model-00041-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00041-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00041-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00041-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.switch_mlp.up_proj.biases": "model-00041-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.switch_mlp.up_proj.scales": "model-00041-of-00047.safetensors",
+ "model.layers.53.block_sparse_moe.switch_mlp.up_proj.weight": "model-00041-of-00047.safetensors",
+ "model.layers.53.input_layernorm.weight": "model-00041-of-00047.safetensors",
+ "model.layers.53.post_attention_layernorm.weight": "model-00041-of-00047.safetensors",
+ "model.layers.53.self_attn.k_norm.weight": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.k_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.k_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.k_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.o_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.o_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.o_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.q_norm.weight": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.q_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.q_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.q_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.v_proj.biases": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.v_proj.scales": "model-00040-of-00047.safetensors",
+ "model.layers.53.self_attn.v_proj.weight": "model-00040-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.e_score_correction_bias": "model-00042-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.gate.biases": "model-00041-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.gate.scales": "model-00041-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.gate.weight": "model-00041-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.switch_mlp.down_proj.biases": "model-00042-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.switch_mlp.down_proj.scales": "model-00042-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.switch_mlp.down_proj.weight": "model-00042-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00041-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00041-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00041-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.switch_mlp.up_proj.biases": "model-00042-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.switch_mlp.up_proj.scales": "model-00042-of-00047.safetensors",
+ "model.layers.54.block_sparse_moe.switch_mlp.up_proj.weight": "model-00042-of-00047.safetensors",
+ "model.layers.54.input_layernorm.weight": "model-00042-of-00047.safetensors",
+ "model.layers.54.post_attention_layernorm.weight": "model-00042-of-00047.safetensors",
+ "model.layers.54.self_attn.k_norm.weight": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.k_proj.biases": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.k_proj.scales": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.k_proj.weight": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.o_proj.biases": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.o_proj.scales": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.o_proj.weight": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.q_norm.weight": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.q_proj.biases": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.q_proj.scales": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.q_proj.weight": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.v_proj.biases": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.v_proj.scales": "model-00041-of-00047.safetensors",
+ "model.layers.54.self_attn.v_proj.weight": "model-00041-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.e_score_correction_bias": "model-00043-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.gate.biases": "model-00042-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.gate.scales": "model-00042-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.gate.weight": "model-00042-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.switch_mlp.down_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.switch_mlp.down_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.switch_mlp.down_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00042-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00042-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00042-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.switch_mlp.up_proj.biases": "model-00042-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.switch_mlp.up_proj.scales": "model-00042-of-00047.safetensors",
+ "model.layers.55.block_sparse_moe.switch_mlp.up_proj.weight": "model-00042-of-00047.safetensors",
+ "model.layers.55.input_layernorm.weight": "model-00043-of-00047.safetensors",
+ "model.layers.55.post_attention_layernorm.weight": "model-00043-of-00047.safetensors",
+ "model.layers.55.self_attn.k_norm.weight": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.k_proj.biases": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.k_proj.scales": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.k_proj.weight": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.o_proj.biases": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.o_proj.scales": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.o_proj.weight": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.q_norm.weight": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.q_proj.biases": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.q_proj.scales": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.q_proj.weight": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.v_proj.biases": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.v_proj.scales": "model-00042-of-00047.safetensors",
+ "model.layers.55.self_attn.v_proj.weight": "model-00042-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.e_score_correction_bias": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.gate.biases": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.gate.scales": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.gate.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.switch_mlp.down_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.switch_mlp.down_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.switch_mlp.down_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.switch_mlp.up_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.switch_mlp.up_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.56.block_sparse_moe.switch_mlp.up_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.input_layernorm.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.post_attention_layernorm.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.k_norm.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.k_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.k_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.k_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.o_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.o_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.o_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.q_norm.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.q_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.q_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.q_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.v_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.v_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.56.self_attn.v_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.e_score_correction_bias": "model-00044-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.gate.biases": "model-00043-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.gate.scales": "model-00043-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.gate.weight": "model-00043-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.switch_mlp.down_proj.biases": "model-00044-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.switch_mlp.down_proj.scales": "model-00044-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.switch_mlp.down_proj.weight": "model-00044-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00044-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00044-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00044-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.switch_mlp.up_proj.biases": "model-00044-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.switch_mlp.up_proj.scales": "model-00044-of-00047.safetensors",
+ "model.layers.57.block_sparse_moe.switch_mlp.up_proj.weight": "model-00044-of-00047.safetensors",
+ "model.layers.57.input_layernorm.weight": "model-00044-of-00047.safetensors",
+ "model.layers.57.post_attention_layernorm.weight": "model-00044-of-00047.safetensors",
+ "model.layers.57.self_attn.k_norm.weight": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.k_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.k_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.k_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.o_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.o_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.o_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.q_norm.weight": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.q_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.q_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.q_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.v_proj.biases": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.v_proj.scales": "model-00043-of-00047.safetensors",
+ "model.layers.57.self_attn.v_proj.weight": "model-00043-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.e_score_correction_bias": "model-00045-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.gate.biases": "model-00044-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.gate.scales": "model-00044-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.gate.weight": "model-00044-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.switch_mlp.down_proj.biases": "model-00045-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.switch_mlp.down_proj.scales": "model-00045-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.switch_mlp.down_proj.weight": "model-00045-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00044-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00044-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00044-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.switch_mlp.up_proj.biases": "model-00045-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.switch_mlp.up_proj.scales": "model-00045-of-00047.safetensors",
+ "model.layers.58.block_sparse_moe.switch_mlp.up_proj.weight": "model-00045-of-00047.safetensors",
+ "model.layers.58.input_layernorm.weight": "model-00045-of-00047.safetensors",
+ "model.layers.58.post_attention_layernorm.weight": "model-00045-of-00047.safetensors",
+ "model.layers.58.self_attn.k_norm.weight": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.k_proj.biases": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.k_proj.scales": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.k_proj.weight": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.o_proj.biases": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.o_proj.scales": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.o_proj.weight": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.q_norm.weight": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.q_proj.biases": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.q_proj.scales": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.q_proj.weight": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.v_proj.biases": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.v_proj.scales": "model-00044-of-00047.safetensors",
+ "model.layers.58.self_attn.v_proj.weight": "model-00044-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.e_score_correction_bias": "model-00046-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.gate.biases": "model-00045-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.gate.scales": "model-00045-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.gate.weight": "model-00045-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.switch_mlp.down_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.switch_mlp.down_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.switch_mlp.down_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00045-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00045-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00045-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.switch_mlp.up_proj.biases": "model-00045-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.switch_mlp.up_proj.scales": "model-00045-of-00047.safetensors",
+ "model.layers.59.block_sparse_moe.switch_mlp.up_proj.weight": "model-00045-of-00047.safetensors",
+ "model.layers.59.input_layernorm.weight": "model-00046-of-00047.safetensors",
+ "model.layers.59.post_attention_layernorm.weight": "model-00046-of-00047.safetensors",
+ "model.layers.59.self_attn.k_norm.weight": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.k_proj.biases": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.k_proj.scales": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.k_proj.weight": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.o_proj.biases": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.o_proj.scales": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.o_proj.weight": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.q_norm.weight": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.q_proj.biases": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.q_proj.scales": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.q_proj.weight": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.v_proj.biases": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.v_proj.scales": "model-00045-of-00047.safetensors",
+ "model.layers.59.self_attn.v_proj.weight": "model-00045-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.e_score_correction_bias": "model-00006-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.gate.biases": "model-00005-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.gate.scales": "model-00005-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.gate.weight": "model-00005-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.switch_mlp.down_proj.biases": "model-00006-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.switch_mlp.down_proj.scales": "model-00006-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.switch_mlp.down_proj.weight": "model-00006-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00005-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00005-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00005-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.switch_mlp.up_proj.biases": "model-00006-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.switch_mlp.up_proj.scales": "model-00006-of-00047.safetensors",
+ "model.layers.6.block_sparse_moe.switch_mlp.up_proj.weight": "model-00006-of-00047.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00006-of-00047.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00006-of-00047.safetensors",
+ "model.layers.6.self_attn.k_norm.weight": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.k_proj.biases": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.k_proj.scales": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.o_proj.biases": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.o_proj.scales": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.q_norm.weight": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.q_proj.biases": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.q_proj.scales": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.v_proj.biases": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.v_proj.scales": "model-00005-of-00047.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00005-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.e_score_correction_bias": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.gate.biases": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.gate.scales": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.gate.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.switch_mlp.down_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.switch_mlp.down_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.switch_mlp.down_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.switch_mlp.up_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.switch_mlp.up_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.60.block_sparse_moe.switch_mlp.up_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.input_layernorm.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.post_attention_layernorm.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.k_norm.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.k_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.k_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.k_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.o_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.o_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.o_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.q_norm.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.q_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.q_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.q_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.v_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.v_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.60.self_attn.v_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.e_score_correction_bias": "model-00047-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.gate.biases": "model-00046-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.gate.scales": "model-00046-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.gate.weight": "model-00046-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.switch_mlp.down_proj.biases": "model-00047-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.switch_mlp.down_proj.scales": "model-00047-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.switch_mlp.down_proj.weight": "model-00047-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00047-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00047-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00047-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.switch_mlp.up_proj.biases": "model-00047-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.switch_mlp.up_proj.scales": "model-00047-of-00047.safetensors",
+ "model.layers.61.block_sparse_moe.switch_mlp.up_proj.weight": "model-00047-of-00047.safetensors",
+ "model.layers.61.input_layernorm.weight": "model-00047-of-00047.safetensors",
+ "model.layers.61.post_attention_layernorm.weight": "model-00047-of-00047.safetensors",
+ "model.layers.61.self_attn.k_norm.weight": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.k_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.k_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.k_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.o_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.o_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.o_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.q_norm.weight": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.q_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.q_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.q_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.v_proj.biases": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.v_proj.scales": "model-00046-of-00047.safetensors",
+ "model.layers.61.self_attn.v_proj.weight": "model-00046-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.e_score_correction_bias": "model-00007-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.gate.biases": "model-00006-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.gate.scales": "model-00006-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.gate.weight": "model-00006-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.switch_mlp.down_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.switch_mlp.down_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.switch_mlp.down_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00006-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00006-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00006-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.switch_mlp.up_proj.biases": "model-00006-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.switch_mlp.up_proj.scales": "model-00006-of-00047.safetensors",
+ "model.layers.7.block_sparse_moe.switch_mlp.up_proj.weight": "model-00006-of-00047.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00007-of-00047.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00007-of-00047.safetensors",
+ "model.layers.7.self_attn.k_norm.weight": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.k_proj.biases": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.k_proj.scales": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.o_proj.biases": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.o_proj.scales": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.q_norm.weight": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.q_proj.biases": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.q_proj.scales": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.v_proj.biases": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.v_proj.scales": "model-00006-of-00047.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00006-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.e_score_correction_bias": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.gate.biases": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.gate.scales": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.gate.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.switch_mlp.down_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.switch_mlp.down_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.switch_mlp.down_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.switch_mlp.up_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.switch_mlp.up_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.8.block_sparse_moe.switch_mlp.up_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.k_norm.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.k_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.k_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.o_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.o_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.q_norm.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.q_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.q_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.v_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.v_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.e_score_correction_bias": "model-00008-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.gate.biases": "model-00007-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.gate.scales": "model-00007-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.gate.weight": "model-00007-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.switch_mlp.down_proj.biases": "model-00008-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.switch_mlp.down_proj.scales": "model-00008-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.switch_mlp.down_proj.weight": "model-00008-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.switch_mlp.gate_proj.biases": "model-00008-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.switch_mlp.gate_proj.scales": "model-00008-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.switch_mlp.gate_proj.weight": "model-00008-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.switch_mlp.up_proj.biases": "model-00008-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.switch_mlp.up_proj.scales": "model-00008-of-00047.safetensors",
+ "model.layers.9.block_sparse_moe.switch_mlp.up_proj.weight": "model-00008-of-00047.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00008-of-00047.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00008-of-00047.safetensors",
+ "model.layers.9.self_attn.k_norm.weight": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.k_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.k_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.o_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.o_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.q_norm.weight": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.q_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.q_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.v_proj.biases": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.v_proj.scales": "model-00007-of-00047.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00007-of-00047.safetensors",
+ "model.norm.weight": "model-00047-of-00047.safetensors"
+ }
+}
\ No newline at end of file
diff --git a/tokenizer.json b/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..27ba950155a5ce69f8f0d6a733447616fd552b9b
--- /dev/null
+++ b/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b81e5e5cba2b169e86a0771825a927e9d41b4c4484ded4a286410f41f702f17
+size 15523144
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..82e3586036b21d16557ab22e2ddced8579e025f5
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,65 @@
+{
+ "add_prefix_space": false,
+ "backend": "tokenizers",
+ "bos_token": "]~!b[",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "[e~[",
+ "extra_special_tokens": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "]<]speech[>[",
+ "]<]image[>[",
+ "]<]video[>[",
+ "]<]start of speech[>[",
+ "]<]end of speech[>[",
+ "]<]start of image[>[",
+ "]<]end of image[>[",
+ "]<]start of video[>[",
+ "]<]end of video[>[",
+ "]<]vision pad[>[",
+ "]~!b[",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "[e~[",
+ "]!d~[",
+ "]!p~[",
+ "]~b]",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "is_local": true,
+ "model_max_length": 40960000,
+ "tokenizer_class": "TokenizersBackend",
+ "tool_parser_type": "minimax_m2",
+ "unk_token": "]!d~[",
+ "chat_template": "{# ----------\u2011\u2011\u2011 special token variables \u2011\u2011\u2011---------- #}\n{%- set toolcall_begin_token = '' -%}\n{%- set toolcall_end_token = '' -%}\n{#- Tool Rendering Functions ============================================== -#}\n{%- macro render_tool_namespace(namespace_name, tool_list) -%}\n{%- for tool in tool_list -%}\n{{ tool.function | tojson(ensure_ascii=False) }}\n{% endfor -%}\n{%- endmacro -%}\n{%- macro visible_text(content) -%}\n {%- if content is string -%}\n {{ content }}\n {%- elif content is iterable and content is not mapping -%}\n {%- for item in content -%}\n {%- if item is mapping and item.type == 'text' -%}\n {{- item.text }}\n {%- elif item is string -%}\n {{- item }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{- content }}\n {%- endif -%}\n{%- endmacro -%}\n{#- System Message Construction ============================================ -#}\n{%- macro build_system_message(system_message) -%}\n {%- if system_message and system_message.content -%}\n {{- visible_text(system_message.content) }}\n {%- else -%}\n {%- if model_identity is not defined -%}\n {%- set model_identity = \"You are a helpful assistant. Your name is MiniMax-M2.5 and is built by MiniMax.\" -%}\n {%- endif -%}\n {{- model_identity }}\n {%- endif -%}\n \n {#- Handle current_date -#}\n {%- if system_message and system_message.current_date -%}\n {{- '\\n' ~ 'Current date: ' + system_message.current_date }}\n {%- endif -%}\n {#- Handle current_location -#}\n {%- if system_message and system_message.current_location -%}\n {{- '\\n' ~ 'Current location: ' + system_message.current_location }}\n {%- endif -%}\n{%- endmacro -%}\n{#- Main Template Logic ================================================= -#}\n{#- Extract system message (only first message if it's system) -#}\n{%- set system_message = none -%}\n{%- set conversation_messages = messages -%}\n{%- if messages and messages[0].role == \"system\" -%}\n {%- set system_message = messages[0] -%}\n {%- set conversation_messages = messages[1:] -%}\n{%- endif -%}\n{#- Get the last user message turn, for interleved thinking -#}\n{%- set ns = namespace(last_user_index=-1) %}\n{% for m in conversation_messages %}\n {%- if m.role == 'user' %}\n {% set ns.last_user_index = loop.index0 -%}\n {%- endif %}\n{%- endfor %}\n{#- Render system message -#}\n{{- ']~!b[' ~ ']~b]system' ~ '\\n' }}\n{{- build_system_message(system_message) }}\n{#- Render tools if available -#}\n{%- if tools -%}\n {{- '\\n\\n' ~ '# Tools' ~ '\\n' ~ 'You may call one or more tools to assist with the user query.\\nHere are the tools available in JSONSchema format:' ~ '\\n' }}\n {{- '\\n' ~ '' ~ '\\n' }}\n {{- render_tool_namespace(\"functions\", tools) }}\n {{- '' ~ '\\n\\n' }}\n{{- 'When making tool calls, use XML format to invoke tools and pass parameters:' ~ '\\n' }}\n{{- '\\n' ~ toolcall_begin_token }}\n\nparam-value-1\nparam-value-2\n...\n\n{{- '\\n' ~ toolcall_end_token }}\n{%- endif -%}\n{{- '[e~[\\n' }}\n\n{#- Render messages -#}\n{%- set last_tool_call = namespace(name=none) -%}\n{%- for message in conversation_messages -%}\n {%- if message.role == 'assistant' -%}\n {#- Only render reasoning_content if no user message follows -#}\n {{- ']~b]ai' ~ '\\n' }}\n\n {%- set reasoning_content = '' %}\n {%- set content = visible_text(message.content) %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '' in content %}\n {%- set reasoning_content = content.split('')[0].strip('\\n').split('')[-1].strip('\\n') %}\n {%- set content = content.split('')[-1].strip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if reasoning_content and loop.index0 > ns.last_user_index -%}\n {{- '' ~ '\\n' ~ reasoning_content ~ '\\n' ~ '' ~ '\\n\\n' }}\n {%- endif -%}\n {%- if content -%}\n {{- content }}\n {%- endif -%}\n {%- if message.tool_calls -%}\n {{- '\\n' ~ toolcall_begin_token ~ '\\n' }}\n\n {%- for tool_call in message.tool_calls -%}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '' }}\n {% set _args = tool_call.arguments %}\n {%- for k, v in _args.items() %}\n {{- '' }}\n {{- v | tojson(ensure_ascii=False) if v is not string else v }}\n {{- '' }}\n {% endfor %}\n {{- '' ~ '\\n' }}\n {%- endfor -%}\n \n {{- toolcall_end_token}}\n {%- set last_tool_call.name = message.tool_calls[-1].name -%}\n {%- else -%}\n {%- set last_tool_call.name = none -%}\n {%- endif -%}\n {{- '[e~[' ~ '\\n' }}\n \n {%- elif message.role == 'tool' -%}\n {%- if last_tool_call.name is none -%}\n {{- raise_exception(\"Message has tool role, but there was no previous assistant message with a tool call!\") }}\n {%- endif -%}\n {%- if loop.first or (conversation_messages[loop.index0 - 1].role != 'tool') -%}\n {{- ']~b]tool' }}\n {%- endif -%}\n {%- if message.content is string -%}\n {{- '\\n' }}\n {{- message.content }}\n {{- '' }}\n {%- else -%}\n {%- for tr in message.content -%}\n {{- '\\n' }}\n {{- tr.output if tr.output is defined else (tr.text if tr.type == 'text' and tr.text is defined else tr) }}\n {{- '\\n' }}\n {%- endfor -%}\n {%- endif -%}\n {%- if loop.last or (conversation_messages[loop.index0 + 1].role != 'tool') -%}\n {{- '[e~[\\n' -}}\n {%- endif -%}\n \n {%- elif message.role == 'user' -%}\n {{- ']~b]user' ~ '\\n' }}\n {{- visible_text(message.content) }}\n {{- '[e~[' ~ '\\n' }}\n {%- endif -%}\n{%- endfor -%}\n\n{#- Generation prompt -#}\n{%- if add_generation_prompt -%}\n{{- ']~b]ai' ~ '\\n' ~ '' ~ '\\n' }}\n{%- endif -%}\n"
+}
\ No newline at end of file