Upload 11 files
Browse files- .gitattributes +1 -0
- MonoidForCausalLM.py +792 -0
- README.md +254 -3
- chat_template.jinja +93 -0
- config.json +27 -0
- generation_config.json +9 -0
- model.safetensors +3 -0
- monoid_scan_cuda.py +411 -0
- special_tokens_map.json +23 -0
- tokenizer.json +3 -0
- tokenizer_config.json +2063 -0
- training_args.bin +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
MonoidForCausalLM.py
ADDED
|
@@ -0,0 +1,792 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
MonoidForCausalLM — Causal Monoid Language Model (HuggingFace Compatible)
|
| 3 |
+
MonoidForCausalLM — 幺半群因果语言模型 (兼容 HuggingFace)
|
| 4 |
+
|
| 5 |
+
Architecture / 架构概要:
|
| 6 |
+
Replace softmax attention with a monoid parallel-scan recurrence.
|
| 7 |
+
用幺半群并行扫描递推替代 softmax 注意力。
|
| 8 |
+
|
| 9 |
+
Core idea / 核心思想:
|
| 10 |
+
Softmax attention computes o_t = Σ_{i≤t} softmax(q_t·k_i) v_i
|
| 11 |
+
— requires O(T) KV-cache per layer at inference.
|
| 12 |
+
Softmax 注意力计算 o_t = Σ_{i≤t} softmax(q_t·k_i) v_i
|
| 13 |
+
— 推理时每层需要 O(T) 的 KV 缓存。
|
| 14 |
+
|
| 15 |
+
Monoid attention compresses the entire causal history into a
|
| 16 |
+
fixed-size state matrix S_t ∈ ℝ^{d×d} per head:
|
| 17 |
+
S_t = α_t · S_{t-1} + k_t ⊗ v_t (explicit causal recurrence)
|
| 18 |
+
o_t = q_t · S_t (state readout)
|
| 19 |
+
幺半群注意力将完整因果历史压缩到每个头一个固定大小的状态矩阵 S_t:
|
| 20 |
+
S_t = α_t · S_{t-1} + k_t ⊗ v_t (显式因果递推)
|
| 21 |
+
o_t = q_t · S_t (状态读出)
|
| 22 |
+
|
| 23 |
+
This is a monoid because the binary operator:
|
| 24 |
+
(log_α, S) ⊕ (log_β, X) = (log_α + log_β, exp(log_β)·S + X)
|
| 25 |
+
is associative → enables parallel prefix scan for training,
|
| 26 |
+
and O(1) sequential update for inference.
|
| 27 |
+
这是一个幺半群,因为二元算子:
|
| 28 |
+
(log_α, S) ⊕ (log_β, X) = (log_α + log_β, exp(log_β)·S + X)
|
| 29 |
+
满足结合律 → 训练时可用并行前缀扫描,推理时 O(1) 逐步递推。
|
| 30 |
+
|
| 31 |
+
Key properties / 关键特性:
|
| 32 |
+
✓ Explicit causal modeling — α_t gate explicitly controls how fast
|
| 33 |
+
past information decays, making causality a first-class citizen.
|
| 34 |
+
显式因果建模 — α_t 衰减门显式控制历史信息的遗忘速率,
|
| 35 |
+
因果性是一等公民而非靠 mask 施加的约束。
|
| 36 |
+
|
| 37 |
+
✓ Monoid state compression — the full causal prefix x_{1:t} is
|
| 38 |
+
lossily compressed into a fixed-size (d×d) state matrix per head.
|
| 39 |
+
No O(T) KV-cache needed; inference is O(1) per token per layer.
|
| 40 |
+
幺半群状态压缩 — 完整因果前缀 x_{1:t} 被有损压缩到每个头
|
| 41 |
+
固定大小的 (d×d) 状态矩阵中。无需 O(T) KV 缓存;
|
| 42 |
+
推理时每层每 token O(1)。
|
| 43 |
+
|
| 44 |
+
✓ Parallel training — associativity of ⊕ enables O(T) parallel
|
| 45 |
+
prefix scan (vs O(T²) for softmax attention).
|
| 46 |
+
并行训练 — ⊕ 的结合律使 O(T) 并行前缀扫描成为可能
|
| 47 |
+
(对比 softmax 注意力的 O(T²))。
|
| 48 |
+
|
| 49 |
+
Reuses LlamaMLP + LlamaRMSNorm from HuggingFace Transformers.
|
| 50 |
+
复用 HuggingFace Transformers 的 LlamaMLP + LlamaRMSNorm。
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
from __future__ import annotations
|
| 54 |
+
|
| 55 |
+
from typing import Optional, Union
|
| 56 |
+
|
| 57 |
+
import torch
|
| 58 |
+
import torch.nn as nn
|
| 59 |
+
from torch import Tensor
|
| 60 |
+
|
| 61 |
+
from transformers import PretrainedConfig, PreTrainedModel, GenerationMixin, AutoConfig, AutoModelForCausalLM
|
| 62 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
| 63 |
+
from transformers.models.llama.modeling_llama import LlamaMLP, LlamaRMSNorm
|
| 64 |
+
|
| 65 |
+
from monoid_scan_cuda import parallel_scan, parallel_scan_with_state
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 69 |
+
# Config / 配置
|
| 70 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 71 |
+
|
| 72 |
+
class MonoidConfig(PretrainedConfig):
|
| 73 |
+
"""
|
| 74 |
+
Configuration for the Monoid causal language model.
|
| 75 |
+
幺半群因果语言模型的配置。
|
| 76 |
+
|
| 77 |
+
Mirrors LlamaConfig for the shared components (MLP, RMSNorm, embedding)
|
| 78 |
+
so that weights can be directly transferred from Llama checkpoints.
|
| 79 |
+
与 LlamaConfig 的共享组件 (MLP, RMSNorm, embedding) 保持一致,
|
| 80 |
+
以便从 Llama 检查点直接迁移权重。
|
| 81 |
+
"""
|
| 82 |
+
model_type = "monoid"
|
| 83 |
+
|
| 84 |
+
def __init__(
|
| 85 |
+
self,
|
| 86 |
+
vocab_size: int = 32000,
|
| 87 |
+
hidden_size: int = 576,
|
| 88 |
+
intermediate_size: int = 1536,
|
| 89 |
+
num_hidden_layers: int = 30,
|
| 90 |
+
num_attention_heads: int = 9,
|
| 91 |
+
head_dim: int = 64,
|
| 92 |
+
max_position_embeddings: int = 2048,
|
| 93 |
+
rms_norm_eps: float = 1e-5,
|
| 94 |
+
hidden_act: str = "silu",
|
| 95 |
+
mlp_bias: bool = False,
|
| 96 |
+
attention_bias: bool = False,
|
| 97 |
+
tie_word_embeddings: bool = True,
|
| 98 |
+
initializer_range: float = 0.041666666666666664,
|
| 99 |
+
pad_token_id: int = None,
|
| 100 |
+
bos_token_id: int = 1,
|
| 101 |
+
eos_token_id: int = 2,
|
| 102 |
+
**kwargs,
|
| 103 |
+
):
|
| 104 |
+
super().__init__(
|
| 105 |
+
pad_token_id=pad_token_id,
|
| 106 |
+
bos_token_id=bos_token_id,
|
| 107 |
+
eos_token_id=eos_token_id,
|
| 108 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 109 |
+
**kwargs,
|
| 110 |
+
)
|
| 111 |
+
self.vocab_size = vocab_size
|
| 112 |
+
self.hidden_size = hidden_size
|
| 113 |
+
self.intermediate_size = intermediate_size
|
| 114 |
+
self.num_hidden_layers = num_hidden_layers
|
| 115 |
+
self.num_attention_heads = num_attention_heads
|
| 116 |
+
self.head_dim = head_dim
|
| 117 |
+
self.max_position_embeddings = max_position_embeddings
|
| 118 |
+
self.rms_norm_eps = rms_norm_eps
|
| 119 |
+
self.hidden_act = hidden_act
|
| 120 |
+
self.mlp_bias = mlp_bias
|
| 121 |
+
self.attention_bias = attention_bias
|
| 122 |
+
self.initializer_range = initializer_range
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 126 |
+
# Monoid Cache — O(1) state replaces O(T) KV-Cache
|
| 127 |
+
# 幺半群缓存 — O(1) 状态替代 O(T) KV 缓存
|
| 128 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 129 |
+
|
| 130 |
+
class MonoidCache:
|
| 131 |
+
"""
|
| 132 |
+
Per-layer monoid state cache for autoregressive inference.
|
| 133 |
+
自回归推理的逐层幺半群状态缓存。
|
| 134 |
+
|
| 135 |
+
Unlike Transformer KV-Cache that stores all past keys & values (O(T) memory),
|
| 136 |
+
each layer here stores exactly ONE state tuple:
|
| 137 |
+
(log_decay_acc, S) where S ∈ ℝ^{B, H, d, d}
|
| 138 |
+
This is the monoid "sum" of all past (log_α_i, k_i⊗v_i) via ⊕.
|
| 139 |
+
Memory is O(1) per layer regardless of sequence length.
|
| 140 |
+
|
| 141 |
+
不同于 Transformer 的 KV-Cache (存储所有过去的 key 和 value, O(T) 内存),
|
| 142 |
+
这里每层仅存储一个状态元组:
|
| 143 |
+
(log_decay_acc, S) 其中 S ∈ ℝ^{B, H, d, d}
|
| 144 |
+
这是所有过去的 (log_α_i, k_i⊗v_i) 通过 ⊕ 累积的幺半群 "和"。
|
| 145 |
+
无论序列多长,每层内存 O(1)。
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
def __init__(self):
|
| 149 |
+
self.states: list[tuple[Tensor, Tensor] | None] = []
|
| 150 |
+
self.seen_tokens: int = 0
|
| 151 |
+
|
| 152 |
+
def get_seq_length(self, layer_idx: int = 0) -> int:
|
| 153 |
+
return self.seen_tokens
|
| 154 |
+
|
| 155 |
+
def update(self, layer_idx: int, state: tuple[Tensor, Tensor]):
|
| 156 |
+
"""Store the accumulated monoid state for a given layer.
|
| 157 |
+
存储指定层的累积幺半群状态。"""
|
| 158 |
+
while len(self.states) <= layer_idx:
|
| 159 |
+
self.states.append(None)
|
| 160 |
+
self.states[layer_idx] = state
|
| 161 |
+
|
| 162 |
+
def get_state(self, layer_idx: int) -> tuple[Tensor, Tensor] | None:
|
| 163 |
+
"""Retrieve the accumulated monoid state for a given layer.
|
| 164 |
+
获取指定层的累积幺半群状态。"""
|
| 165 |
+
if layer_idx < len(self.states):
|
| 166 |
+
return self.states[layer_idx]
|
| 167 |
+
return None
|
| 168 |
+
|
| 169 |
+
def reorder_cache(self, beam_idx: torch.LongTensor):
|
| 170 |
+
"""Reorder cache for beam search. 为 beam search 重排缓存。"""
|
| 171 |
+
for i, state in enumerate(self.states):
|
| 172 |
+
if state is not None:
|
| 173 |
+
log_d, kv = state
|
| 174 |
+
self.states[i] = (log_d[beam_idx], kv[beam_idx])
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 178 |
+
# Monoid Operator — the algebraic heart
|
| 179 |
+
# 幺半群算子 — 代数核心
|
| 180 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 181 |
+
|
| 182 |
+
def monoid_op(
|
| 183 |
+
a: tuple[Tensor, Tensor],
|
| 184 |
+
b: tuple[Tensor, Tensor],
|
| 185 |
+
) -> tuple[Tensor, Tensor]:
|
| 186 |
+
"""
|
| 187 |
+
The monoid binary operator ⊕ on (log-space decay, state matrix) pairs.
|
| 188 |
+
幺半群二元算子 ⊕,作用于 (对数衰减, 状态矩阵) 对。
|
| 189 |
+
|
| 190 |
+
Definition / 定义:
|
| 191 |
+
(log_α, S) ⊕ (log_β, X) = (log_α + log_β, exp(log_β)·S + X)
|
| 192 |
+
|
| 193 |
+
Why this is a monoid / 为什么这是幺半群:
|
| 194 |
+
• Associativity / 结合律:
|
| 195 |
+
(a ⊕ b) ⊕ c = a ⊕ (b ⊕ c) ✓
|
| 196 |
+
This enables parallel prefix scan for training (reduce tree)
|
| 197 |
+
and O(1) left-fold for inference (sequential append).
|
| 198 |
+
结合律使训练时可以用并行前缀扫描 (归约树),
|
| 199 |
+
推理时可以 O(1) 左折叠 (逐步追加)。
|
| 200 |
+
|
| 201 |
+
• Identity / 单位元:
|
| 202 |
+
e = (0, 0) → e ⊕ a = a ⊕ e = a ✓
|
| 203 |
+
|
| 204 |
+
Why log-space / 为什么用对数空间:
|
| 205 |
+
Working in log-space for the decay factor avoids numerical
|
| 206 |
+
underflow when α^T → 0 for long sequences.
|
| 207 |
+
衰减因子在对数空间中运算,避免长序列下 α^T → 0 的数值下溢。
|
| 208 |
+
|
| 209 |
+
Causal semantics / 因果语义:
|
| 210 |
+
S_t = α_t · S_{t-1} + k_t ⊗ v_t
|
| 211 |
+
The decay α_t ∈ (0,1) explicitly controls how much of the past
|
| 212 |
+
the model retains. This is *explicit causal modeling* — the model
|
| 213 |
+
must learn to balance retention vs novelty at every timestep.
|
| 214 |
+
衰减 α_t ∈ (0,1) 显式控制模型保留多少过去信息。
|
| 215 |
+
这就是 *显式因果建模* — 模型必须在每个时间步学习如何
|
| 216 |
+
平衡保留旧信息与吸收新信息。
|
| 217 |
+
"""
|
| 218 |
+
log_a, kv_a = a
|
| 219 |
+
log_b, kv_b = b
|
| 220 |
+
|
| 221 |
+
new_log = log_a + log_b # log(α·β) = log_α + log_β
|
| 222 |
+
decay_b = torch.exp(log_b) # β = exp(log_β)
|
| 223 |
+
while decay_b.dim() < kv_a.dim():
|
| 224 |
+
decay_b = decay_b.unsqueeze(-1) # broadcast to [B,H,...,1,1]
|
| 225 |
+
|
| 226 |
+
return new_log, kv_a * decay_b + kv_b # β·S + X
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 230 |
+
# Monoid Attention — the core innovation
|
| 231 |
+
# 幺半群注意力 — 核心创新层
|
| 232 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 233 |
+
|
| 234 |
+
class MonoidAttention(nn.Module):
|
| 235 |
+
"""
|
| 236 |
+
Monoid Causal Attention — replaces softmax attention entirely.
|
| 237 |
+
幺半群因果注意力 — 完全替代 softmax 注意力。
|
| 238 |
+
|
| 239 |
+
Key differences from standard attention / 与标准注意力的关键区别:
|
| 240 |
+
✗ No RoPE / positional encoding — position is implicitly encoded
|
| 241 |
+
by the causal decay gate α_t. The model learns *when* to forget
|
| 242 |
+
rather than encoding *where* tokens are.
|
| 243 |
+
不使用 RoPE / 位置编码 — 位置信息由因果衰减门 α_t 隐式编码。
|
| 244 |
+
模型学习 *何时遗忘* 而非编码 token *在哪里*。
|
| 245 |
+
|
| 246 |
+
✗ No KV-Cache — replaced by MonoidCache with O(1) state per layer.
|
| 247 |
+
Each state S ∈ ℝ^{H×d×d} is a compressed summary of ALL past tokens.
|
| 248 |
+
不使用 KV 缓存 — 由 O(1) 的 MonoidCache 状态替代。
|
| 249 |
+
每个状态 S ∈ ℝ^{H×d×d} 是所有过去 token 的压缩摘要。
|
| 250 |
+
|
| 251 |
+
✗ No attention mask — causality is built into the recurrence itself.
|
| 252 |
+
S_t only depends on S_{t-1} and the current token by construction.
|
| 253 |
+
不使用注意力掩码 — 因果性内建于递推结构本身。
|
| 254 |
+
S_t 仅依赖 S_{t-1} 和当前 token,结构上保证因果性。
|
| 255 |
+
|
| 256 |
+
Computation / 计算:
|
| 257 |
+
Training (parallel scan, O(T)):
|
| 258 |
+
k_t = SiLU(k_proj(x_t)) # non-negative keys for PSD state
|
| 259 |
+
S_t = α_t · S_{t-1} + k_t ⊗ v_t # monoid recurrence via prefix scan
|
| 260 |
+
o_t = q_t · S_t # linear readout from state
|
| 261 |
+
|
| 262 |
+
Inference (RNN mode, O(1) per token):
|
| 263 |
+
Same recurrence, but applied one token at a time.
|
| 264 |
+
|
| 265 |
+
训练 (并行扫描, O(T)):
|
| 266 |
+
k_t = SiLU(k_proj(x_t)) # 非负 key 保证状态矩阵半正定
|
| 267 |
+
S_t = α_t · S_{t-1} + k_t ⊗ v_t # 通过前缀扫描实现幺半群递推
|
| 268 |
+
o_t = q_t · S_t # 从状态中线性读出
|
| 269 |
+
|
| 270 |
+
推理 (RNN 模式, 每 token O(1)):
|
| 271 |
+
同一递推公式, 但逐 token 顺序应用。
|
| 272 |
+
"""
|
| 273 |
+
|
| 274 |
+
def __init__(self, config: MonoidConfig, layer_idx: int):
|
| 275 |
+
super().__init__()
|
| 276 |
+
self.layer_idx = layer_idx
|
| 277 |
+
self.hidden_size = config.hidden_size
|
| 278 |
+
self.num_heads = config.num_attention_heads
|
| 279 |
+
self.head_dim = config.head_dim
|
| 280 |
+
self.scaling = self.head_dim ** -0.5 # 1/√d, scale factor for q·S readout
|
| 281 |
+
# q·S 读出的缩放因子
|
| 282 |
+
|
| 283 |
+
# --- Projections (transferred from Llama) ---
|
| 284 |
+
# --- 投影层 (从 Llama 迁移) ---
|
| 285 |
+
# q_proj, o_proj: identical dims to Llama, direct copy
|
| 286 |
+
# k_proj, v_proj: Llama GQA has fewer KV heads; we tile to full heads
|
| 287 |
+
# q_proj, o_proj: 维度与 Llama 一致, 直接复制
|
| 288 |
+
# k_proj, v_proj: Llama GQA 的 KV 头更少; 我们重复到全头数
|
| 289 |
+
self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
|
| 290 |
+
self.k_proj = nn.Linear(config.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
|
| 291 |
+
self.v_proj = nn.Linear(config.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
|
| 292 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
|
| 293 |
+
|
| 294 |
+
# --- Decay gate (novel component, randomly initialized) ---
|
| 295 |
+
# --- 衰减门 (全新组件, 随机初始化) ---
|
| 296 |
+
# Projects hidden_size → num_heads, yielding one scalar α per head.
|
| 297 |
+
# After sigmoid: α_t ∈ (0,1) controls per-head forgetting rate.
|
| 298 |
+
# This is the key to *explicit causal modeling*: the model learns
|
| 299 |
+
# a content-dependent decay, not a fixed positional bias.
|
| 300 |
+
# 将 hidden_size 投影到 num_heads, 每个头产生一个标量 α。
|
| 301 |
+
# 经过 sigmoid 后: α_t ∈ (0,1) 控制每个头的遗忘速率。
|
| 302 |
+
# 这是 *显式因果建模* 的关键: 模型学习的是内容相关的衰减,
|
| 303 |
+
# 而非固定的位置偏置。
|
| 304 |
+
self.decay_proj = nn.Linear(config.hidden_size, self.num_heads, bias=True)
|
| 305 |
+
|
| 306 |
+
# --- QK-Norm (novel component, randomly initialized) ---
|
| 307 |
+
# --- QK 归一化 (全新组件, 随机初始化) ---
|
| 308 |
+
# Stabilizes the scale of q·S readout. Without this, the state
|
| 309 |
+
# matrix S (sum of outer products) can grow unboundedly.
|
| 310 |
+
# 稳定 q·S 读出的尺度。没有这个, 状态矩阵 S (外积之和)
|
| 311 |
+
# 可能无界增长。
|
| 312 |
+
self.q_norm = LlamaRMSNorm(self.head_dim, eps=config.rms_norm_eps)
|
| 313 |
+
self.k_norm = LlamaRMSNorm(self.head_dim, eps=config.rms_norm_eps)
|
| 314 |
+
|
| 315 |
+
# --- Learnable initial state h0 (novel component, zero-initialized) ---
|
| 316 |
+
# --- 可学习初始状态 h0 (全新组件, 零初始化) ---
|
| 317 |
+
# S_0 = h0 ∈ ℝ^{1, H, d, d}, shared across batch.
|
| 318 |
+
# Zero-init means the model starts with "no memory" — a clean slate.
|
| 319 |
+
# The model can learn a non-zero h0 as a kind of "system prompt" state.
|
| 320 |
+
# S_0 = h0 ∈ ℝ^{1, H, d, d}, 跨 batch 共享。
|
| 321 |
+
# 零初始化意味着模型从"无记忆"开始 — 一张白纸。
|
| 322 |
+
# 模型可以学习非零的 h0 作为一种"系统提示"状态。
|
| 323 |
+
self.h0 = nn.Parameter(torch.zeros(1, self.num_heads, self.head_dim, self.head_dim))
|
| 324 |
+
|
| 325 |
+
def forward(
|
| 326 |
+
self,
|
| 327 |
+
hidden_states: Tensor,
|
| 328 |
+
monoid_cache: MonoidCache | None = None,
|
| 329 |
+
use_cache: bool = False,
|
| 330 |
+
) -> tuple[Tensor, tuple[Tensor, Tensor] | None]:
|
| 331 |
+
"""
|
| 332 |
+
Args:
|
| 333 |
+
hidden_states: [B, T, hidden_size]
|
| 334 |
+
monoid_cache: O(1) state cache for inference
|
| 335 |
+
推理用 O(1) 状态缓存
|
| 336 |
+
use_cache: whether to use/update the cache
|
| 337 |
+
是否使用/更新缓存
|
| 338 |
+
|
| 339 |
+
Returns:
|
| 340 |
+
output: [B, T, hidden_size]
|
| 341 |
+
final_state: (log_decay_acc, S) or None
|
| 342 |
+
"""
|
| 343 |
+
B, T, _ = hidden_states.shape
|
| 344 |
+
H, d = self.num_heads, self.head_dim
|
| 345 |
+
|
| 346 |
+
# --- Project to multi-head Q, K, V ---
|
| 347 |
+
# --- 投影到多头 Q, K, V ---
|
| 348 |
+
q = self.q_proj(hidden_states).view(B, T, H, d).transpose(1, 2) # [B,H,T,d]
|
| 349 |
+
k = self.k_proj(hidden_states).view(B, T, H, d).transpose(1, 2)
|
| 350 |
+
v = self.v_proj(hidden_states).view(B, T, H, d).transpose(1, 2)
|
| 351 |
+
|
| 352 |
+
# --- QK-Norm: stabilize q·S readout scale ---
|
| 353 |
+
# --- QK 归一化: 稳定 q·S 读出尺度 ---
|
| 354 |
+
q = self.q_norm(q) * self.scaling
|
| 355 |
+
k = self.k_norm(k)
|
| 356 |
+
|
| 357 |
+
# --- Non-negative keys via SiLU ---
|
| 358 |
+
# --- 通过 SiLU 保证 key 非负 ---
|
| 359 |
+
# Why: the state S = Σ α^{t-i} k_i⊗v_i is a sum of outer products.
|
| 360 |
+
# Non-negative k ensures S is positive semi-definite (PSD),
|
| 361 |
+
# preventing "feature erasure" where one token's contribution
|
| 362 |
+
# cancels another's. PSD guarantees monotonic information accumulation.
|
| 363 |
+
# 原因: 状态 S = Σ α^{t-i} k_i⊗v_i 是外积之和。
|
| 364 |
+
# 非负的 k 保证 S 半正定 (PSD), 防止一个 token 的贡献
|
| 365 |
+
# 抵消另一个 token 的"特征擦除"现象。
|
| 366 |
+
# PSD 保证信息单调积累。
|
| 367 |
+
k = torch.nn.functional.silu(k)
|
| 368 |
+
|
| 369 |
+
# --- Compute per-head decay gate α_t ---
|
| 370 |
+
# --- 计算每头衰减门 α_t ---
|
| 371 |
+
# sigmoid ensures α ∈ (0,1), then log-space for numerical stability.
|
| 372 |
+
# sigmoid 保证 α ∈ (0,1), 然后转到对数空间保证数值稳定性。
|
| 373 |
+
alpha = torch.sigmoid(self.decay_proj(hidden_states)) # [B,T,H]
|
| 374 |
+
alpha = alpha.transpose(1, 2).unsqueeze(-1) # [B,H,T,1]
|
| 375 |
+
log_alpha = torch.log(alpha.clamp(min=1e-6))
|
| 376 |
+
|
| 377 |
+
# ══════════════════════════════════════════════════════════
|
| 378 |
+
# Inference path (RNN mode): O(1) per token per layer
|
| 379 |
+
# 推理路径 (RNN 模式): 每层每 token O(1)
|
| 380 |
+
# ══════════════════════════════════════════════════════════
|
| 381 |
+
# When generating, T=1. We apply the monoid operator once
|
| 382 |
+
# to fold the new token into the accumulated state.
|
| 383 |
+
# This is where "O(1) inference" materializes:
|
| 384 |
+
# S_t = α_t · S_{t-1} + k_t ⊗ v_t (one monoid_op call)
|
| 385 |
+
# o_t = q_t · S_t (one matmul)
|
| 386 |
+
# Total: O(H·d²) per layer — independent of sequence length.
|
| 387 |
+
#
|
| 388 |
+
# 生成时 T=1。我们调用一次幺半群算子将新 token 折叠进累积状态。
|
| 389 |
+
# 这就是 "O(1) 推理" 的具体体现:
|
| 390 |
+
# S_t = α_t · S_{t-1} + k_t ⊗ v_t (一次 monoid_op)
|
| 391 |
+
# o_t = q_t · S_t (一次矩阵乘法)
|
| 392 |
+
# 总计: 每层 O(H·d²) — 与序列长度无关。
|
| 393 |
+
if use_cache and T == 1:
|
| 394 |
+
# Outer product: k_t ⊗ v_t ∈ ℝ^{H×d×d}
|
| 395 |
+
# 外积: k_t ⊗ v_t ∈ ℝ^{H×d×d}
|
| 396 |
+
kv_t = torch.einsum('bhd, bhe -> bhde', k[:, :, 0], v[:, :, 0])
|
| 397 |
+
log_t = log_alpha[:, :, 0] # [B,H,1]
|
| 398 |
+
|
| 399 |
+
prev = monoid_cache.get_state(self.layer_idx) if monoid_cache else None
|
| 400 |
+
if prev is None:
|
| 401 |
+
# First token: initialize from learnable h0
|
| 402 |
+
# 第一个 token: 从可学习的 h0 初始化
|
| 403 |
+
decay_t = torch.exp(log_t)
|
| 404 |
+
while decay_t.dim() < self.h0.dim():
|
| 405 |
+
decay_t = decay_t.unsqueeze(-1)
|
| 406 |
+
new_state = (log_t, self.h0.expand(B, -1, -1, -1) * decay_t + kv_t)
|
| 407 |
+
else:
|
| 408 |
+
# Subsequent tokens: fold via monoid_op — O(1)!
|
| 409 |
+
# 后续 token: 通过 monoid_op 折叠 — O(1)!
|
| 410 |
+
new_state = monoid_op(prev, (log_t, kv_t))
|
| 411 |
+
|
| 412 |
+
if monoid_cache is not None:
|
| 413 |
+
monoid_cache.update(self.layer_idx, new_state)
|
| 414 |
+
|
| 415 |
+
# Readout: o_t = q_t · S_t
|
| 416 |
+
# 读出: o_t = q_t · S_t
|
| 417 |
+
o = torch.einsum('bhd, bhde -> bhe', q[:, :, 0], new_state[1])
|
| 418 |
+
# Reshape [B,H,d] → [B,1,H*d] (heads contiguous, matching scan path)
|
| 419 |
+
# 重塑 [B,H,d] → [B,1,H*d] (头连续排列, 与扫描路径一致)
|
| 420 |
+
o = o.contiguous().view(B, 1, -1)
|
| 421 |
+
return self.o_proj(o), new_state
|
| 422 |
+
|
| 423 |
+
# ══════════════════════════════════════════════════════════
|
| 424 |
+
# Training path (parallel scan): O(T) via prefix sum
|
| 425 |
+
# 训练路径 (并行扫描): 通过前缀和 O(T)
|
| 426 |
+
# ══════════════════════════════════════════════════════════
|
| 427 |
+
# For a full sequence of length T, compute ALL prefix states
|
| 428 |
+
# S_1, S_2, ..., S_T simultaneously using parallel prefix scan.
|
| 429 |
+
# Complexity: O(T) work, O(log T) depth — GPU-friendly.
|
| 430 |
+
#
|
| 431 |
+
# 对长度为 T 的完整序列, 使用并行前缀扫描同时计算所有前缀状态
|
| 432 |
+
# S_1, S_2, ..., S_T。
|
| 433 |
+
# 复杂度: O(T) 工作量, O(log T) 深度 — GPU 友好。
|
| 434 |
+
|
| 435 |
+
# Batch outer product: kv_{t} = k_t ⊗ v_t for all t
|
| 436 |
+
# 批量外积: kv_{t} = k_t ⊗ v_t, 对所有 t
|
| 437 |
+
kv = torch.einsum('bhtd, bhte -> bhtde', k, v) # [B,H,T,d,d]
|
| 438 |
+
|
| 439 |
+
if use_cache:
|
| 440 |
+
# Prefill with state extraction (for switching to RNN inference)
|
| 441 |
+
# 带状态提取的预填充 (用于切换到 RNN 推理)
|
| 442 |
+
states, final_state = parallel_scan_with_state(log_alpha, kv)
|
| 443 |
+
else:
|
| 444 |
+
# Pure training, no state needed
|
| 445 |
+
# 纯训练, 不需要状态
|
| 446 |
+
states = parallel_scan(log_alpha, kv)
|
| 447 |
+
final_state = None
|
| 448 |
+
|
| 449 |
+
# ── Incorporate h0: make training consistent with inference ──
|
| 450 |
+
# ── 融入 h0: 使训练与推理一致 ──
|
| 451 |
+
# parallel_scan starts from S_0 = 0, but inference starts from S_0 = h0.
|
| 452 |
+
# Fix: S_t(with h0) = h0 · Π_{i=1}^{t} α_i + S_t(from scan)
|
| 453 |
+
# The cumulative decay Π_{i=1}^{t} α_i = exp(Σ_{i=1}^{t} log_α_i).
|
| 454 |
+
# parallel_scan 从 S_0 = 0 开始, 但推理从 S_0 = h0 开始。
|
| 455 |
+
# 修正: S_t(含h0) = h0 · Π_{i=1}^{t} α_i + S_t(扫描结果)
|
| 456 |
+
# 累积衰减 Π_{i=1}^{t} α_i = exp(Σ_{i=1}^{t} log_α_i)。
|
| 457 |
+
cum_log_decay = torch.cumsum(log_alpha.squeeze(-1), dim=2) # [B,H,T]
|
| 458 |
+
cum_decay = torch.exp(cum_log_decay).unsqueeze(-1).unsqueeze(-1) # [B,H,T,1,1]
|
| 459 |
+
states = states + self.h0.unsqueeze(2) * cum_decay # [B,H,T,d,d]
|
| 460 |
+
|
| 461 |
+
if use_cache:
|
| 462 |
+
# Update final_state to include h0 contribution
|
| 463 |
+
# 更新最终状态以包含 h0 的贡献
|
| 464 |
+
final_state = (final_state[0], states[:, :, -1])
|
| 465 |
+
if monoid_cache is not None:
|
| 466 |
+
monoid_cache.update(self.layer_idx, final_state)
|
| 467 |
+
|
| 468 |
+
# Readout: o_t = q_t · S_t for all t simultaneously
|
| 469 |
+
# 读出: o_t = q_t · S_t, 对所有 t 同时计算
|
| 470 |
+
o = torch.einsum('bhtd, bhtde -> bhte', q, states)
|
| 471 |
+
o = o.transpose(1, 2).contiguous().view(B, T, -1)
|
| 472 |
+
return self.o_proj(o), final_state
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 476 |
+
# Decoder Layer: MonoidAttn + LlamaMLP + LlamaRMSNorm
|
| 477 |
+
# 解码层: 幺半群注意力 + LlamaMLP + LlamaRMSNorm
|
| 478 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 479 |
+
|
| 480 |
+
class MonoidDecoderLayer(nn.Module):
|
| 481 |
+
"""
|
| 482 |
+
Pre-Norm Transformer block with Monoid attention.
|
| 483 |
+
使用幺半群注意力的 Pre-Norm Transformer 块。
|
| 484 |
+
|
| 485 |
+
Data flow / 数据流:
|
| 486 |
+
x → RMSNorm → MonoidAttn → +residual → RMSNorm → LlamaMLP → +residual → out
|
| 487 |
+
|
| 488 |
+
The MLP and RMSNorm are identical to Llama (weights transferred directly).
|
| 489 |
+
Only MonoidAttention is the novel component.
|
| 490 |
+
MLP 和 RMSNorm 与 Llama 完全相同 (权重直接迁移)。
|
| 491 |
+
仅 MonoidAttention 是全新组件。
|
| 492 |
+
"""
|
| 493 |
+
gradient_checkpointing = False
|
| 494 |
+
|
| 495 |
+
def __init__(self, config: MonoidConfig, layer_idx: int):
|
| 496 |
+
super().__init__()
|
| 497 |
+
self.self_attn = MonoidAttention(config, layer_idx)
|
| 498 |
+
self.mlp = LlamaMLP(config)
|
| 499 |
+
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 500 |
+
self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 501 |
+
|
| 502 |
+
def forward(
|
| 503 |
+
self,
|
| 504 |
+
hidden_states: Tensor,
|
| 505 |
+
monoid_cache: MonoidCache | None = None,
|
| 506 |
+
use_cache: bool = False,
|
| 507 |
+
) -> Tensor:
|
| 508 |
+
# --- Attention block with residual ---
|
| 509 |
+
# --- 注意力块 + 残差连接 ---
|
| 510 |
+
residual = hidden_states
|
| 511 |
+
hidden_states = self.input_layernorm(hidden_states)
|
| 512 |
+
hidden_states, _ = self.self_attn(hidden_states, monoid_cache=monoid_cache, use_cache=use_cache)
|
| 513 |
+
hidden_states = residual + hidden_states
|
| 514 |
+
|
| 515 |
+
# --- FFN block with residual ---
|
| 516 |
+
# --- 前馈网络块 + 残差连接 ---
|
| 517 |
+
residual = hidden_states
|
| 518 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
| 519 |
+
hidden_states = self.mlp(hidden_states)
|
| 520 |
+
hidden_states = residual + hidden_states
|
| 521 |
+
|
| 522 |
+
return hidden_states
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 526 |
+
# MonoidModel (backbone)
|
| 527 |
+
# MonoidModel (骨干网络)
|
| 528 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 529 |
+
|
| 530 |
+
class MonoidPreTrainedModel(PreTrainedModel):
|
| 531 |
+
config_class = MonoidConfig
|
| 532 |
+
base_model_prefix = "model"
|
| 533 |
+
supports_gradient_checkpointing = True
|
| 534 |
+
_no_split_modules = ["MonoidDecoderLayer"]
|
| 535 |
+
|
| 536 |
+
def _init_weights(self, module: nn.Module):
|
| 537 |
+
std = self.config.initializer_range
|
| 538 |
+
if isinstance(module, nn.Linear):
|
| 539 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 540 |
+
if module.bias is not None:
|
| 541 |
+
module.bias.data.zero_()
|
| 542 |
+
elif isinstance(module, nn.Embedding):
|
| 543 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 544 |
+
if module.padding_idx is not None:
|
| 545 |
+
module.weight.data[module.padding_idx].zero_()
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
class MonoidModel(MonoidPreTrainedModel):
|
| 549 |
+
"""
|
| 550 |
+
Stack of MonoidDecoderLayers with token embedding and final norm.
|
| 551 |
+
幺半群解码层堆叠, 带 token 嵌入和最终归一化。
|
| 552 |
+
|
| 553 |
+
Forward: embed_tokens → N × MonoidDecoderLayer → final_norm
|
| 554 |
+
前向: embed_tokens → N × MonoidDecoderLayer → final_norm
|
| 555 |
+
"""
|
| 556 |
+
|
| 557 |
+
def __init__(self, config: MonoidConfig):
|
| 558 |
+
super().__init__(config)
|
| 559 |
+
self.padding_idx = config.pad_token_id
|
| 560 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 561 |
+
self.layers = nn.ModuleList(
|
| 562 |
+
[MonoidDecoderLayer(config, i) for i in range(config.num_hidden_layers)]
|
| 563 |
+
)
|
| 564 |
+
self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 565 |
+
self.gradient_checkpointing = False
|
| 566 |
+
self.post_init()
|
| 567 |
+
|
| 568 |
+
def forward(
|
| 569 |
+
self,
|
| 570 |
+
input_ids: Tensor | None = None,
|
| 571 |
+
inputs_embeds: Tensor | None = None,
|
| 572 |
+
monoid_cache: MonoidCache | None = None,
|
| 573 |
+
use_cache: bool = False,
|
| 574 |
+
) -> BaseModelOutputWithPast:
|
| 575 |
+
if inputs_embeds is None:
|
| 576 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
| 577 |
+
|
| 578 |
+
hidden_states = inputs_embeds
|
| 579 |
+
for layer in self.layers:
|
| 580 |
+
hidden_states = layer(hidden_states, monoid_cache=monoid_cache, use_cache=use_cache)
|
| 581 |
+
|
| 582 |
+
hidden_states = self.norm(hidden_states)
|
| 583 |
+
|
| 584 |
+
return BaseModelOutputWithPast(
|
| 585 |
+
last_hidden_state=hidden_states,
|
| 586 |
+
past_key_values=monoid_cache,
|
| 587 |
+
)
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 591 |
+
# MonoidForCausalLM — the full causal LM
|
| 592 |
+
# MonoidForCausalLM — 完整因果语言模型
|
| 593 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 594 |
+
|
| 595 |
+
class MonoidForCausalLM(MonoidPreTrainedModel, GenerationMixin):
|
| 596 |
+
"""
|
| 597 |
+
Monoid-based causal language model with LM head.
|
| 598 |
+
基于幺半群的因果语言模型, 带语言模型头。
|
| 599 |
+
|
| 600 |
+
The architecture in one sentence:
|
| 601 |
+
"Llama body + Monoid mind" — reuse Llama's proven MLP/embeddings,
|
| 602 |
+
replace attention with monoid state compression for O(1) inference.
|
| 603 |
+
|
| 604 |
+
一句话概括架构:
|
| 605 |
+
"Llama 的身体 + 幺半群的思维" — 复用 Llama 成熟的 MLP/嵌入层,
|
| 606 |
+
用幺半群状态压缩替换注意力, 实现 O(1) 推理。
|
| 607 |
+
"""
|
| 608 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 609 |
+
|
| 610 |
+
# Tell HuggingFace GenerationMixin NOT to create DynamicCache.
|
| 611 |
+
# Monoid uses its own O(1) MonoidCache, not KV-Cache.
|
| 612 |
+
# 告诉 HuggingFace 不要创建 DynamicCache。
|
| 613 |
+
# Monoid 使用自己的 O(1) MonoidCache, 不是 KV 缓存。
|
| 614 |
+
_is_stateful = True
|
| 615 |
+
|
| 616 |
+
def __init__(self, config: MonoidConfig):
|
| 617 |
+
super().__init__(config)
|
| 618 |
+
self.model = MonoidModel(config)
|
| 619 |
+
self.vocab_size = config.vocab_size
|
| 620 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 621 |
+
self.post_init()
|
| 622 |
+
|
| 623 |
+
def get_input_embeddings(self):
|
| 624 |
+
return self.model.embed_tokens
|
| 625 |
+
|
| 626 |
+
def set_input_embeddings(self, value):
|
| 627 |
+
self.model.embed_tokens = value
|
| 628 |
+
|
| 629 |
+
def get_output_embeddings(self):
|
| 630 |
+
return self.lm_head
|
| 631 |
+
|
| 632 |
+
def set_output_embeddings(self, new_embeddings):
|
| 633 |
+
self.lm_head = new_embeddings
|
| 634 |
+
|
| 635 |
+
def prepare_inputs_for_generation(
|
| 636 |
+
self,
|
| 637 |
+
input_ids: Tensor,
|
| 638 |
+
past_key_values=None,
|
| 639 |
+
attention_mask: Tensor | None = None,
|
| 640 |
+
inputs_embeds: Tensor | None = None,
|
| 641 |
+
**kwargs,
|
| 642 |
+
) -> dict:
|
| 643 |
+
"""
|
| 644 |
+
Called by GenerationMixin at each decoding step.
|
| 645 |
+
GenerationMixin 在每个解码步调用此方法。
|
| 646 |
+
|
| 647 |
+
HuggingFace may pass a DynamicCache; we intercept and replace
|
| 648 |
+
it with MonoidCache since we don't use standard KV-cache.
|
| 649 |
+
HuggingFace 可能传入 DynamicCache; 我们拦截并替换为
|
| 650 |
+
MonoidCache, 因为我们不使用标准 KV 缓存。
|
| 651 |
+
"""
|
| 652 |
+
# Intercept non-MonoidCache objects (e.g. DynamicCache from GenerationMixin)
|
| 653 |
+
# 拦截非 MonoidCache 对象 (如 GenerationMixin 创建的 DynamicCache)
|
| 654 |
+
if past_key_values is not None and not isinstance(past_key_values, MonoidCache):
|
| 655 |
+
past_key_values = None
|
| 656 |
+
|
| 657 |
+
if past_key_values is not None and past_key_values.seen_tokens > 0:
|
| 658 |
+
# Cache exists → only feed the latest token (O(1) inference)
|
| 659 |
+
# 缓存已存在 → 只需输入最新的 token (O(1) 推理)
|
| 660 |
+
input_ids = input_ids[:, -1:]
|
| 661 |
+
|
| 662 |
+
model_inputs = {
|
| 663 |
+
"input_ids": input_ids,
|
| 664 |
+
"monoid_cache": past_key_values,
|
| 665 |
+
"use_cache": True,
|
| 666 |
+
}
|
| 667 |
+
return model_inputs
|
| 668 |
+
|
| 669 |
+
def forward(
|
| 670 |
+
self,
|
| 671 |
+
input_ids: Tensor | None = None,
|
| 672 |
+
attention_mask: Tensor | None = None, # kept for API compat; monoid ignores this
|
| 673 |
+
# 保留 API 兼容性; 幺半群不使用
|
| 674 |
+
position_ids: Tensor | None = None, # kept for API compat; monoid ignores this
|
| 675 |
+
# 保留 API 兼容性; 幺半群不使用
|
| 676 |
+
past_key_values: MonoidCache | None = None,
|
| 677 |
+
inputs_embeds: Tensor | None = None,
|
| 678 |
+
labels: Tensor | None = None,
|
| 679 |
+
use_cache: bool | None = None,
|
| 680 |
+
monoid_cache: MonoidCache | None = None,
|
| 681 |
+
output_attentions: bool | None = None, # kept for API compat
|
| 682 |
+
output_hidden_states: bool | None = None, # kept for API compat
|
| 683 |
+
logits_to_keep: int | Tensor = 0,
|
| 684 |
+
**kwargs,
|
| 685 |
+
) -> CausalLMOutputWithPast:
|
| 686 |
+
# monoid_cache takes priority; fall back to past_key_values for GenerationMixin compat
|
| 687 |
+
# monoid_cache 优先; 兼容 GenerationMixin 传入的 past_key_values
|
| 688 |
+
cache = monoid_cache or past_key_values
|
| 689 |
+
|
| 690 |
+
# Discard any non-MonoidCache (e.g. DynamicCache injected by GenerationMixin)
|
| 691 |
+
# 丢弃任何非 MonoidCache 对象 (如 GenerationMixin 注入的 DynamicCache)
|
| 692 |
+
if cache is not None and not isinstance(cache, MonoidCache):
|
| 693 |
+
cache = None
|
| 694 |
+
|
| 695 |
+
if use_cache and cache is None:
|
| 696 |
+
cache = MonoidCache()
|
| 697 |
+
|
| 698 |
+
outputs = self.model(
|
| 699 |
+
input_ids=input_ids,
|
| 700 |
+
inputs_embeds=inputs_embeds,
|
| 701 |
+
monoid_cache=cache,
|
| 702 |
+
use_cache=bool(use_cache),
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
hidden_states = outputs.last_hidden_state
|
| 706 |
+
|
| 707 |
+
# Optionally only compute logits for the last K tokens (memory saving)
|
| 708 |
+
# 可选仅计算最后 K 个 token 的 logits (节省内存)
|
| 709 |
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) and logits_to_keep > 0 else slice(None)
|
| 710 |
+
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
| 711 |
+
|
| 712 |
+
# Standard causal LM loss: cross-entropy with shift
|
| 713 |
+
# 标准因果语言模型损失: 带偏移的交叉熵
|
| 714 |
+
loss = None
|
| 715 |
+
if labels is not None:
|
| 716 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 717 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 718 |
+
loss = nn.functional.cross_entropy(
|
| 719 |
+
shift_logits.view(-1, self.vocab_size),
|
| 720 |
+
shift_labels.view(-1),
|
| 721 |
+
ignore_index=-100,
|
| 722 |
+
)
|
| 723 |
+
|
| 724 |
+
if cache is not None:
|
| 725 |
+
cache.seen_tokens += (input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1])
|
| 726 |
+
|
| 727 |
+
return CausalLMOutputWithPast(
|
| 728 |
+
loss=loss,
|
| 729 |
+
logits=logits,
|
| 730 |
+
past_key_values=cache,
|
| 731 |
+
)
|
| 732 |
+
|
| 733 |
+
|
| 734 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 735 |
+
# AutoModel Registration / 自动注册
|
| 736 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 737 |
+
|
| 738 |
+
AutoConfig.register("monoid", MonoidConfig)
|
| 739 |
+
AutoModelForCausalLM.register(MonoidConfig, MonoidForCausalLM)
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 743 |
+
# Smoke Tests / 验证
|
| 744 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━��━━━━━━━
|
| 745 |
+
|
| 746 |
+
if __name__ == '__main__':
|
| 747 |
+
device = torch.device('mps' if torch.backends.mps.is_available() else 'cpu')
|
| 748 |
+
print(f'Device: {device}')
|
| 749 |
+
|
| 750 |
+
config = MonoidConfig(
|
| 751 |
+
vocab_size=49152,
|
| 752 |
+
hidden_size=576,
|
| 753 |
+
intermediate_size=1536,
|
| 754 |
+
num_hidden_layers=30,
|
| 755 |
+
num_attention_heads=9,
|
| 756 |
+
head_dim=64,
|
| 757 |
+
rms_norm_eps=1e-5,
|
| 758 |
+
hidden_act="silu",
|
| 759 |
+
tie_word_embeddings=True,
|
| 760 |
+
)
|
| 761 |
+
model = MonoidForCausalLM(config).to(device)
|
| 762 |
+
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 763 |
+
print(f'Parameters: {n_params:,}')
|
| 764 |
+
|
| 765 |
+
# -- Training smoke test / 训练冒烟测试 --
|
| 766 |
+
B, T = 2, 64
|
| 767 |
+
ids = torch.randint(0, config.vocab_size, (B, T), device=device)
|
| 768 |
+
out = model(ids, labels=ids)
|
| 769 |
+
print(f'Train — logits: {out.logits.shape}, loss: {out.loss:.4f}')
|
| 770 |
+
|
| 771 |
+
# -- Inference smoke test (manual RNN loop) / 推理冒烟测试 (手动 RNN 循环) --
|
| 772 |
+
prompt = torch.randint(0, config.vocab_size, (1, 8), device=device)
|
| 773 |
+
cache = MonoidCache()
|
| 774 |
+
# Prefill / 预填充
|
| 775 |
+
prefill_out = model(prompt, use_cache=True, monoid_cache=cache)
|
| 776 |
+
print(f'Prefill — logits: {prefill_out.logits.shape}, cache seen: {cache.seen_tokens}')
|
| 777 |
+
# Decode 1 token / 解码 1 个 token
|
| 778 |
+
next_tok = prefill_out.logits[:, -1:].argmax(dim=-1)
|
| 779 |
+
step_out = model(next_tok, use_cache=True, monoid_cache=cache)
|
| 780 |
+
print(f'Decode — logits: {step_out.logits.shape}, cache seen: {cache.seen_tokens}')
|
| 781 |
+
|
| 782 |
+
# -- Monoid associativity check / 幺半群结合律验证 --
|
| 783 |
+
print('\nMonoid associativity check / 幺半群结合律验证:')
|
| 784 |
+
a = (torch.randn(1, 1, 1), torch.randn(1, 1, 4, 4))
|
| 785 |
+
b = (torch.randn(1, 1, 1), torch.randn(1, 1, 4, 4))
|
| 786 |
+
c = (torch.randn(1, 1, 1), torch.randn(1, 1, 4, 4))
|
| 787 |
+
ab_c = monoid_op(monoid_op(a, b), c)
|
| 788 |
+
a_bc = monoid_op(a, monoid_op(b, c))
|
| 789 |
+
err = (ab_c[1] - a_bc[1]).abs().max().item()
|
| 790 |
+
print(f' |(a⊕b)⊕c - a⊕(b⊕c)| = {err:.2e}')
|
| 791 |
+
|
| 792 |
+
print('\nDone.')
|
README.md
CHANGED
|
@@ -1,3 +1,254 @@
|
|
| 1 |
-
---
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: transformers
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
language:
|
| 5 |
+
- en
|
| 6 |
+
tags:
|
| 7 |
+
- monoid
|
| 8 |
+
- causal-lm
|
| 9 |
+
- linear-attention
|
| 10 |
+
- state-space
|
| 11 |
+
- O(1)-inference
|
| 12 |
+
- reasoning
|
| 13 |
+
pipeline_tag: text-generation
|
| 14 |
+
model-index:
|
| 15 |
+
- name: Spartacus-1B-Instruct
|
| 16 |
+
results: []
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+
# Spartacus-1B-Instruct — Causal Monoid Language Model
|
| 20 |
+
|
| 21 |
+
A 1.3B parameter language model that replaces softmax attention with **causal monoid state compression**, achieving **O(1) time per token** and **O(1) memory** at inference — regardless of sequence length.
|
| 22 |
+
|
| 23 |
+
Fine-tuned for enhanced reasoning with structured chain-of-thought data.
|
| 24 |
+
|
| 25 |
+
## Monoid Attention — Internal Structure
|
| 26 |
+
|
| 27 |
+
```
|
| 28 |
+
MonoidAttention (per layer, per head)
|
| 29 |
+
┌─────────────────────────────────────────────────────────────────────┐
|
| 30 |
+
│ │
|
| 31 |
+
│ x_t ∈ R^{2048} │
|
| 32 |
+
│ │ │
|
| 33 |
+
│ ├──> q_proj ──> RMSNorm ──> q_t ∈ R^{d} (query) │
|
| 34 |
+
│ │ │
|
| 35 |
+
│ ├──> k_proj ──> RMSNorm ──> SiLU ──> k_t ∈ R^{d} (key, >= 0) │
|
| 36 |
+
│ │ │
|
| 37 |
+
│ ├──> v_proj ──> v_t ∈ R^{d} (value) │
|
| 38 |
+
│ │ │
|
| 39 |
+
│ └──> decay_proj ──> sigmoid ──> alpha_t ∈ (0,1) (decay gate) │
|
| 40 |
+
│ │
|
| 41 |
+
│ k_t (x) v_t │
|
| 42 |
+
│ │ ┌──────────────────────────────┐ │
|
| 43 |
+
│ │ │ State Matrix S_t ∈ R^{d x d} │ │
|
| 44 |
+
│ v │ │ │
|
| 45 |
+
│ S_t = alpha_t * S_{t-1} + k_t (x) v_t │ │
|
| 46 |
+
│ │ │ "Compressed causal history" │ │
|
| 47 |
+
│ │ └──────────────────────────────┘ │
|
| 48 |
+
│ v │
|
| 49 |
+
│ o_t = q_t . S_t ──> o_proj ──> output │
|
| 50 |
+
│ │
|
| 51 |
+
└─────────────────────────────────────────────────────────────────────┘
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
## Monoid State Diagonal — O(1) Compression Contour
|
| 55 |
+
|
| 56 |
+
The state matrix `S_t` accumulates causal history along its diagonal. Each head maintains an independent `d x d` state that compresses ALL past tokens into a fixed footprint:
|
| 57 |
+
|
| 58 |
+
```
|
| 59 |
+
State Matrix S_t ∈ R^{64 x 64} (one per head, 32 heads per layer)
|
| 60 |
+
|
| 61 |
+
k-dim -->
|
| 62 |
+
0 8 16 24 32 40 48 56 63
|
| 63 |
+
┌───┬───┬───┬───┬───┬───┬───┬───┐ 0
|
| 64 |
+
│***│** │* │ │ │ │ │ │ v-dim
|
| 65 |
+
│***│** │* │. │ │ │ │ │ |
|
| 66 |
+
├───┼───┼───┼───┼───┼───┼───┼───┤ 8 |
|
| 67 |
+
│** │***│** │* │. │ │ │ │ v
|
| 68 |
+
│* │***│** │* │. │ │ │ │
|
| 69 |
+
├───┼───┼───┼───┼───┼───┼───┼───┤ 16
|
| 70 |
+
│* │** │***│** │* │. │ │ │
|
| 71 |
+
│. │* │***│** │* │. │ │ │
|
| 72 |
+
├───┼───┼───┼───┼───┼───┼───┼───┤ 24
|
| 73 |
+
│ │. │** │***│** │* │. │ │
|
| 74 |
+
│ │ │* │***│** │* │. │ │
|
| 75 |
+
├───┼───┼───┼───┼───┼───┼───┼───┤ 32
|
| 76 |
+
│ │ │. │** │***│** │* │. │
|
| 77 |
+
│ │ │ │* │***│** │* │. │
|
| 78 |
+
├───┼───┼───┼───┼───┼───┼───┼───┤ 40
|
| 79 |
+
│ │ │ │. │** │***│** │* │
|
| 80 |
+
│ │ │ │ │* │***│** │* │
|
| 81 |
+
├───┼───┼───┼───┼───┼───┼───┼───┤ 48
|
| 82 |
+
│ │ │ │ │. │** │***│** │
|
| 83 |
+
│ │ │ │ │ │* │***│** │
|
| 84 |
+
├───┼───┼───┼───┼───┼───┼───┼���──┤ 56
|
| 85 |
+
│ │ │ │ │ │. │** │***│
|
| 86 |
+
│ │ │ │ │ │ │* │***│
|
| 87 |
+
└───┴───┴───┴───┴───┴───┴───┴───┘ 63
|
| 88 |
+
|
| 89 |
+
Legend: *** = high activation (recent tokens, alpha^0 ~ alpha^2)
|
| 90 |
+
** = medium (alpha^3 ~ alpha^5)
|
| 91 |
+
* = fading (alpha^6 ~ alpha^10)
|
| 92 |
+
. = near-zero (alpha^11+, effectively forgotten)
|
| 93 |
+
= zero (never reached or fully decayed)
|
| 94 |
+
|
| 95 |
+
The diagonal band emerges because S_t = SUM_{i<=t} alpha^{t-i} * k_i (x) v_i.
|
| 96 |
+
Recent outer products dominate near the diagonal; older ones decay
|
| 97 |
+
exponentially via alpha, creating this characteristic contour.
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
## Key Properties
|
| 102 |
+
|
| 103 |
+
| Property | Transformer (Llama) | Spartacus (Monoid) |
|
| 104 |
+
|---|---|---|
|
| 105 |
+
| Inference time per token | O(T) -- scans full KV-cache | **O(1)** -- single state update |
|
| 106 |
+
| Inference memory per layer | O(T) -- stores all past K,V | **O(1)** -- fixed d x d state matrix |
|
| 107 |
+
| Sequence length extrapolation | Degrades beyond training length | **Unlimited** -- state size is constant |
|
| 108 |
+
| Causality | Imposed via attention mask | **Built into the recurrence** |
|
| 109 |
+
| Training complexity | O(T^2) | **O(T)** via parallel prefix scan |
|
| 110 |
+
|
| 111 |
+
## The Monoid Recurrence
|
| 112 |
+
|
| 113 |
+
Standard attention computes:
|
| 114 |
+
|
| 115 |
+
```
|
| 116 |
+
o_t = sum_{i<=t} softmax(q_t . k_i) v_i -- requires O(T) KV-cache
|
| 117 |
+
```
|
| 118 |
+
|
| 119 |
+
Monoid attention compresses the entire causal history into a **fixed-size state matrix** S_t per head:
|
| 120 |
+
|
| 121 |
+
```
|
| 122 |
+
S_t = alpha_t * S_{t-1} + k_t (x) v_t -- explicit causal recurrence
|
| 123 |
+
o_t = q_t . S_t -- state readout
|
| 124 |
+
```
|
| 125 |
+
|
| 126 |
+
where `alpha_t = sigmoid(decay_proj(x_t))` is a learned, content-dependent decay gate that controls how fast past information fades.
|
| 127 |
+
|
| 128 |
+
## Explicit Causal Modeling
|
| 129 |
+
|
| 130 |
+
Unlike Transformers where causality is a constraint imposed by masking, Spartacus makes causality a **first-class citizen**:
|
| 131 |
+
|
| 132 |
+
- The decay gate `alpha_t` explicitly controls per-head information retention at every timestep
|
| 133 |
+
- The model learns **when to forget** rather than encoding **where tokens are** (no positional encoding needed)
|
| 134 |
+
- No attention mask required -- causality is structural, not enforced
|
| 135 |
+
|
| 136 |
+
## Design Choices
|
| 137 |
+
|
| 138 |
+
- **SiLU-activated keys**: `k = SiLU(k_proj(x))` ensures non-negative keys, making the state matrix `S` positive semi-definite (PSD). This prevents "feature erasure" where one token's contribution cancels another's
|
| 139 |
+
- **Log-space decay**: Working in log-space `log(alpha)` avoids numerical underflow when `alpha^T -> 0` for long sequences
|
| 140 |
+
- **Learnable h0**: The initial state `S_0 = h0` is a learnable parameter (zero-initialized), acting as a compressed "system prompt"
|
| 141 |
+
|
| 142 |
+
## Model Details
|
| 143 |
+
|
| 144 |
+
| Parameter | Value |
|
| 145 |
+
|---|---|
|
| 146 |
+
| Model | `NoesisLab/Spartacus-1B-Instruct` |
|
| 147 |
+
| Architecture | MonoidForCausalLM |
|
| 148 |
+
| Parameters | ~1.34B (tied embeddings) |
|
| 149 |
+
| Hidden size | 2048 |
|
| 150 |
+
| Intermediate size (MLP) | 8192 |
|
| 151 |
+
| Layers | 16 |
|
| 152 |
+
| Attention heads | 32 |
|
| 153 |
+
| Head dimension | 64 |
|
| 154 |
+
| State matrix per head | 64 x 64 = 4096 floats |
|
| 155 |
+
| Vocabulary | 128,256 (Llama-3.2 tokenizer) |
|
| 156 |
+
| Precision | bfloat16 |
|
| 157 |
+
|
| 158 |
+
## Benchmarks (0-shot)
|
| 159 |
+
|
| 160 |
+
| Task | Metric | Value | Stderr |
|
| 161 |
+
|---|---|---|---|
|
| 162 |
+
| ARC-Challenge | acc_norm | 0.3063 | ±0.0135 |
|
| 163 |
+
| ARC-Easy | acc | 0.5518 | ±0.0102 |
|
| 164 |
+
| HellaSwag | acc_norm | 0.4610 | ±0.0050 |
|
| 165 |
+
| PIQA | acc_norm | 0.6915 | ±0.0108 |
|
| 166 |
+
| WinoGrande | acc | 0.5225 | ±0.0140 |
|
| 167 |
+
|
| 168 |
+
### Comparison with ~1B Baselines (acc_norm, 0-shot)
|
| 169 |
+
|
| 170 |
+
| Task | Spartacus-1B-Instruct | TinyLlama-1.1B | Llama 3.2-1B | Mamba-1.4B | RWKV-6-1.6B |
|
| 171 |
+
|---|---|---|---|---|---|
|
| 172 |
+
| ARC-C | **0.3063** | 0.3268 | ~0.359 | 0.284 | ~0.301 |
|
| 173 |
+
| ARC-E | **0.5518** | 0.5547 | ~0.752 | 0.512 | ~0.530 |
|
| 174 |
+
| HellaSwag | **0.4610** | 0.4670 | ~0.546 | 0.435 | ~0.450 |
|
| 175 |
+
| PIQA | **0.6915** | 0.7210 | ~0.740 | 0.655 | ~0.670 |
|
| 176 |
+
| WinoGrande | **0.5225** | 0.5040 | ~0.592 | 0.510 | ~0.515 |
|
| 177 |
+
|
| 178 |
+
> Spartacus achieves competitive performance with sub-quadratic models (Mamba, RWKV) while maintaining **O(1) inference time and memory per token**. Scores marked with ~ are approximate community-reported values.
|
| 179 |
+
|
| 180 |
+
## Training
|
| 181 |
+
|
| 182 |
+
### Stage 1: General SFT
|
| 183 |
+
|
| 184 |
+
- **Base weights**: Transferred from Llama-3.2-1B-Instruct (embeddings, MLP, norms)
|
| 185 |
+
- **Data**: Capybara + smol-smoltalk (general conversation)
|
| 186 |
+
- **Training**: Full-parameter SFT
|
| 187 |
+
|
| 188 |
+
### Stage 2: Reasoning Enhancement
|
| 189 |
+
|
| 190 |
+
- **Data mix**: 60% Qwen3-Short-Reasoning + 20% Capybara + 20% smol-smoltalk
|
| 191 |
+
- **Steps**: 2,000
|
| 192 |
+
- **Learning rate**: 2e-5 (cosine schedule, 50 warmup steps)
|
| 193 |
+
- **Batch size**: 8
|
| 194 |
+
- **Sequence length**: 2,048
|
| 195 |
+
- **Precision**: bfloat16
|
| 196 |
+
- **Optimizer**: AdamW (weight decay 0.01, max grad norm 1.0)
|
| 197 |
+
|
| 198 |
+
The reasoning data uses structured "Thought + Solution" format to strengthen chain-of-thought capabilities while the general data prevents catastrophic forgetting.
|
| 199 |
+
|
| 200 |
+
## Parallel Scan Implementation
|
| 201 |
+
|
| 202 |
+
The `monoid_scan_cuda.py` module provides a Triton JIT-compiled parallel prefix scan:
|
| 203 |
+
|
| 204 |
+
- **Forward**: Sequential scan along T, parallelized across B x H x D on GPU via Triton kernels
|
| 205 |
+
- **Backward**: Reverse-order adjoint scan computes gradients for both values and log-decay gates
|
| 206 |
+
- **Fallback**: Pure PyTorch sequential scan for CPU/MPS
|
| 207 |
+
- **Auto-dispatch**: CUDA -> Triton kernel, otherwise -> PyTorch fallback
|
| 208 |
+
|
| 209 |
+
## Usage
|
| 210 |
+
|
| 211 |
+
```python
|
| 212 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 213 |
+
|
| 214 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 215 |
+
"NoesisLab/Spartacus-1B-Instruct",
|
| 216 |
+
trust_remote_code=True,
|
| 217 |
+
torch_dtype="bfloat16",
|
| 218 |
+
device_map="auto",
|
| 219 |
+
)
|
| 220 |
+
tokenizer = AutoTokenizer.from_pretrained("NoesisLab/Spartacus-1B-Instruct")
|
| 221 |
+
|
| 222 |
+
messages = [{"role": "user", "content": "Hello!"}]
|
| 223 |
+
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 224 |
+
inputs = tokenizer(text, return_tensors="pt").to(model.device)
|
| 225 |
+
|
| 226 |
+
outputs = model.generate(**inputs, max_new_tokens=512)
|
| 227 |
+
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 228 |
+
```
|
| 229 |
+
|
| 230 |
+
## File Structure
|
| 231 |
+
|
| 232 |
+
```
|
| 233 |
+
MonoidForCausalLM.py # Model architecture (MonoidConfig, MonoidAttention, MonoidForCausalLM)
|
| 234 |
+
monoid_scan_cuda.py # Triton JIT parallel prefix scan + PyTorch fallback
|
| 235 |
+
model.safetensors # Model weights (bfloat16)
|
| 236 |
+
config.json # Model configuration
|
| 237 |
+
tokenizer.json # Llama-3.2 tokenizer
|
| 238 |
+
```
|
| 239 |
+
|
| 240 |
+
## Citation
|
| 241 |
+
|
| 242 |
+
```bibtex
|
| 243 |
+
@software{spartacus2025,
|
| 244 |
+
title={Spartacus: Causal Monoid Language Model with O(1) Inference},
|
| 245 |
+
author={NoesisLab},
|
| 246 |
+
year={2025},
|
| 247 |
+
url={https://huggingface.co/NoesisLab/Spartacus-1B-Instruct},
|
| 248 |
+
description={Replaces softmax attention with monoid state compression for constant-time, constant-memory autoregressive generation}
|
| 249 |
+
}
|
| 250 |
+
```
|
| 251 |
+
|
| 252 |
+
## License
|
| 253 |
+
|
| 254 |
+
Apache 2.0
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{{- bos_token }}
|
| 2 |
+
{%- if custom_tools is defined %}
|
| 3 |
+
{%- set tools = custom_tools %}
|
| 4 |
+
{%- endif %}
|
| 5 |
+
{%- if not tools_in_user_message is defined %}
|
| 6 |
+
{%- set tools_in_user_message = true %}
|
| 7 |
+
{%- endif %}
|
| 8 |
+
{%- if not date_string is defined %}
|
| 9 |
+
{%- if strftime_now is defined %}
|
| 10 |
+
{%- set date_string = strftime_now("%d %b %Y") %}
|
| 11 |
+
{%- else %}
|
| 12 |
+
{%- set date_string = "26 Jul 2024" %}
|
| 13 |
+
{%- endif %}
|
| 14 |
+
{%- endif %}
|
| 15 |
+
{%- if not tools is defined %}
|
| 16 |
+
{%- set tools = none %}
|
| 17 |
+
{%- endif %}
|
| 18 |
+
|
| 19 |
+
{#- This block extracts the system message, so we can slot it into the right place. #}
|
| 20 |
+
{%- if messages[0]['role'] == 'system' %}
|
| 21 |
+
{%- set system_message = messages[0]['content']|trim %}
|
| 22 |
+
{%- set messages = messages[1:] %}
|
| 23 |
+
{%- else %}
|
| 24 |
+
{%- set system_message = "" %}
|
| 25 |
+
{%- endif %}
|
| 26 |
+
|
| 27 |
+
{#- System message #}
|
| 28 |
+
{{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
|
| 29 |
+
{%- if tools is not none %}
|
| 30 |
+
{{- "Environment: ipython\n" }}
|
| 31 |
+
{%- endif %}
|
| 32 |
+
{{- "Cutting Knowledge Date: December 2023\n" }}
|
| 33 |
+
{{- "Today Date: " + date_string + "\n\n" }}
|
| 34 |
+
{%- if tools is not none and not tools_in_user_message %}
|
| 35 |
+
{{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
|
| 36 |
+
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
|
| 37 |
+
{{- "Do not use variables.\n\n" }}
|
| 38 |
+
{%- for t in tools %}
|
| 39 |
+
{{- t | tojson(indent=4) }}
|
| 40 |
+
{{- "\n\n" }}
|
| 41 |
+
{%- endfor %}
|
| 42 |
+
{%- endif %}
|
| 43 |
+
{{- system_message }}
|
| 44 |
+
{{- "<|eot_id|>" }}
|
| 45 |
+
|
| 46 |
+
{#- Custom tools are passed in a user message with some extra guidance #}
|
| 47 |
+
{%- if tools_in_user_message and not tools is none %}
|
| 48 |
+
{#- Extract the first user message so we can plug it in here #}
|
| 49 |
+
{%- if messages | length != 0 %}
|
| 50 |
+
{%- set first_user_message = messages[0]['content']|trim %}
|
| 51 |
+
{%- set messages = messages[1:] %}
|
| 52 |
+
{%- else %}
|
| 53 |
+
{{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
|
| 54 |
+
{%- endif %}
|
| 55 |
+
{{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
|
| 56 |
+
{{- "Given the following functions, please respond with a JSON for a function call " }}
|
| 57 |
+
{{- "with its proper arguments that best answers the given prompt.\n\n" }}
|
| 58 |
+
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
|
| 59 |
+
{{- "Do not use variables.\n\n" }}
|
| 60 |
+
{%- for t in tools %}
|
| 61 |
+
{{- t | tojson(indent=4) }}
|
| 62 |
+
{{- "\n\n" }}
|
| 63 |
+
{%- endfor %}
|
| 64 |
+
{{- first_user_message + "<|eot_id|>"}}
|
| 65 |
+
{%- endif %}
|
| 66 |
+
|
| 67 |
+
{%- for message in messages %}
|
| 68 |
+
{%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
|
| 69 |
+
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}
|
| 70 |
+
{%- elif 'tool_calls' in message %}
|
| 71 |
+
{%- if not message.tool_calls|length == 1 %}
|
| 72 |
+
{{- raise_exception("This model only supports single tool-calls at once!") }}
|
| 73 |
+
{%- endif %}
|
| 74 |
+
{%- set tool_call = message.tool_calls[0].function %}
|
| 75 |
+
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
|
| 76 |
+
{{- '{"name": "' + tool_call.name + '", ' }}
|
| 77 |
+
{{- '"parameters": ' }}
|
| 78 |
+
{{- tool_call.arguments | tojson }}
|
| 79 |
+
{{- "}" }}
|
| 80 |
+
{{- "<|eot_id|>" }}
|
| 81 |
+
{%- elif message.role == "tool" or message.role == "ipython" %}
|
| 82 |
+
{{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
|
| 83 |
+
{%- if message.content is mapping or message.content is iterable %}
|
| 84 |
+
{{- message.content | tojson }}
|
| 85 |
+
{%- else %}
|
| 86 |
+
{{- message.content }}
|
| 87 |
+
{%- endif %}
|
| 88 |
+
{{- "<|eot_id|>" }}
|
| 89 |
+
{%- endif %}
|
| 90 |
+
{%- endfor %}
|
| 91 |
+
{%- if add_generation_prompt %}
|
| 92 |
+
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
|
| 93 |
+
{%- endif %}
|
config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"MonoidForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_bias": false,
|
| 6 |
+
"auto_map": {
|
| 7 |
+
"AutoConfig": "MonoidForCausalLM.MonoidConfig",
|
| 8 |
+
"AutoModelForCausalLM": "MonoidForCausalLM.MonoidForCausalLM"
|
| 9 |
+
},
|
| 10 |
+
"bos_token_id": 128000,
|
| 11 |
+
"dtype": "bfloat16",
|
| 12 |
+
"eos_token_id": 128009,
|
| 13 |
+
"head_dim": 64,
|
| 14 |
+
"hidden_act": "silu",
|
| 15 |
+
"hidden_size": 2048,
|
| 16 |
+
"initializer_range": 0.041666666666666664,
|
| 17 |
+
"intermediate_size": 8192,
|
| 18 |
+
"max_position_embeddings": 131072,
|
| 19 |
+
"mlp_bias": false,
|
| 20 |
+
"model_type": "monoid",
|
| 21 |
+
"num_attention_heads": 32,
|
| 22 |
+
"num_hidden_layers": 16,
|
| 23 |
+
"pad_token_id": 128009,
|
| 24 |
+
"rms_norm_eps": 1e-05,
|
| 25 |
+
"transformers_version": "4.57.6",
|
| 26 |
+
"vocab_size": 128256
|
| 27 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 128000,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
128009
|
| 6 |
+
],
|
| 7 |
+
"pad_token_id": 128009,
|
| 8 |
+
"transformers_version": "4.57.6"
|
| 9 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a32bdaef8ff9aa5cba602faa280ab5d3526515a6abd97d411c3448f9b9ebcdc7
|
| 3 |
+
size 2679277744
|
monoid_scan_cuda.py
ADDED
|
@@ -0,0 +1,411 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
monoid_scan_cuda.py — Triton CUDA JIT Accelerated Parallel Prefix Scan
|
| 3 |
+
monoid_scan_cuda.py — Triton CUDA JIT 加速的并行前缀扫描
|
| 4 |
+
|
| 5 |
+
This module implements the parallel prefix scan for the monoid recurrence:
|
| 6 |
+
y_t = exp(log_decay_t) · y_{t-1} + x_t
|
| 7 |
+
本模块实现幺半群递推的并行前缀扫描:
|
| 8 |
+
y_t = exp(log_decay_t) · y_{t-1} + x_t
|
| 9 |
+
|
| 10 |
+
This is the computational backbone of Monoid Attention's state compression.
|
| 11 |
+
这是幺半群注意力状态压缩的计算骨干。
|
| 12 |
+
|
| 13 |
+
Why parallel prefix scan matters / 并行前缀扫描为什么重要:
|
| 14 |
+
The monoid recurrence S_t = α_t·S_{t-1} + kv_t is inherently sequential.
|
| 15 |
+
However, because (log_α, S) ⊕ (log_β, X) = (log_α+log_β, exp(log_β)·S+X)
|
| 16 |
+
is ASSOCIATIVE, we can compute all prefix sums S_1..S_T via a parallel
|
| 17 |
+
reduction tree in O(log T) depth instead of O(T) sequential steps.
|
| 18 |
+
幺半群递推 S_t = α_t·S_{t-1} + kv_t 本质上是串行的。
|
| 19 |
+
但因为 (log_α, S) ⊕ (log_β, X) = (log_α+log_β, exp(log_β)·S+X)
|
| 20 |
+
满足结合律, 我们可以通过并行归约树在 O(log T) 深度内计算所有前缀和 S_1..S_T,
|
| 21 |
+
而非 O(T) 的串行步骤。
|
| 22 |
+
|
| 23 |
+
Training uses O(T) parallel scan (this file).
|
| 24 |
+
Inference uses O(1) sequential monoid_op (in MonoidForCausalLM.py).
|
| 25 |
+
训练使用 O(T) 并行扫描 (本文件)。
|
| 26 |
+
推理使用 O(1) 串行 monoid_op (在 MonoidForCausalLM.py 中)。
|
| 27 |
+
|
| 28 |
+
Implementation:
|
| 29 |
+
Forward: sequential scan along T, parallelized across B*H*D on GPU.
|
| 30 |
+
Backward: reverse-order adjoint scan for gradient computation.
|
| 31 |
+
Auto-dispatches: CUDA → Triton kernel, CPU/MPS → PyTorch fallback.
|
| 32 |
+
|
| 33 |
+
前向: 沿 T 维顺序扫描, 跨 B*H*D 在 GPU 上并行。
|
| 34 |
+
反向: 逆序伴随变量扫描计算梯度。
|
| 35 |
+
自动分派: CUDA → Triton 核函数, CPU/MPS → PyTorch 回退。
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
from __future__ import annotations
|
| 39 |
+
|
| 40 |
+
import torch
|
| 41 |
+
from torch import Tensor
|
| 42 |
+
from torch.autograd import Function
|
| 43 |
+
from typing import Tuple
|
| 44 |
+
|
| 45 |
+
try:
|
| 46 |
+
import triton
|
| 47 |
+
import triton.language as tl
|
| 48 |
+
HAS_TRITON = True
|
| 49 |
+
except ImportError:
|
| 50 |
+
HAS_TRITON = False
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 54 |
+
# Fallback: pure PyTorch sequential scan
|
| 55 |
+
# 回退: 纯 PyTorch 串行扫描 (CPU / MPS / no Triton)
|
| 56 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 57 |
+
|
| 58 |
+
def _sequential_scan(log_decays: Tensor, values: Tensor) -> Tensor:
|
| 59 |
+
"""
|
| 60 |
+
Pure PyTorch sequential scan fallback (when no CUDA / Triton available).
|
| 61 |
+
纯 PyTorch 串行扫描回退 (无 CUDA / Triton 时使用)。
|
| 62 |
+
|
| 63 |
+
Implements the monoid recurrence step by step:
|
| 64 |
+
acc_0 = 0
|
| 65 |
+
acc_t = exp(log_decay_t) · acc_{t-1} + values_t
|
| 66 |
+
This is O(T) sequential — correct but slow on GPU.
|
| 67 |
+
逐步实现幺半群递推:
|
| 68 |
+
acc_0 = 0
|
| 69 |
+
acc_t = exp(log_decay_t) · acc_{t-1} + values_t
|
| 70 |
+
这是 O(T) 串行的 — 结果正确但在 GPU 上较慢。
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
log_decays: [B, H, T, 1] — log of per-head per-step decay gates
|
| 74 |
+
每头每步衰减门的对数
|
| 75 |
+
values: [B, H, T, D_k, D_v] — outer products k_t⊗v_t to accumulate
|
| 76 |
+
待累积的外积 k_t⊗v_t
|
| 77 |
+
Returns:
|
| 78 |
+
output: [B, H, T, D_k, D_v] — all prefix states S_1, ..., S_T
|
| 79 |
+
所有前缀状态 S_1, ..., S_T
|
| 80 |
+
"""
|
| 81 |
+
B, H, T, D_k, D_v = values.shape
|
| 82 |
+
out = torch.empty_like(values)
|
| 83 |
+
# acc represents S_t — the compressed causal state at time t
|
| 84 |
+
# acc 代表 S_t — 时刻 t 的压缩因果状态
|
| 85 |
+
acc = torch.zeros(B, H, D_k, D_v, device=values.device, dtype=values.dtype)
|
| 86 |
+
for t in range(T):
|
| 87 |
+
# S_t = α_t · S_{t-1} + kv_t (the core monoid recurrence)
|
| 88 |
+
# S_t = α_t · S_{t-1} + kv_t (核心幺半群递推)
|
| 89 |
+
decay_t = torch.exp(log_decays[:, :, t]).unsqueeze(-1) # [B,H,1,1]
|
| 90 |
+
acc = acc * decay_t + values[:, :, t]
|
| 91 |
+
out[:, :, t] = acc
|
| 92 |
+
return out
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 96 |
+
# Triton Kernels — GPU-accelerated scan
|
| 97 |
+
# Triton 核函数 — GPU 加速扫描
|
| 98 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 99 |
+
|
| 100 |
+
if HAS_TRITON:
|
| 101 |
+
|
| 102 |
+
@triton.jit
|
| 103 |
+
def _scan_fwd_kernel(
|
| 104 |
+
LD_ptr, V_ptr, O_ptr,
|
| 105 |
+
T, D,
|
| 106 |
+
s_ld_bh, s_ld_t,
|
| 107 |
+
s_v_bh, s_v_t, s_v_d,
|
| 108 |
+
s_o_bh, s_o_t, s_o_d,
|
| 109 |
+
BLOCK_D: tl.constexpr,
|
| 110 |
+
):
|
| 111 |
+
"""
|
| 112 |
+
Forward scan kernel — computes all prefix states S_1..S_T.
|
| 113 |
+
前向扫描核函数 — 计算所有前缀状态 S_1..S_T。
|
| 114 |
+
|
| 115 |
+
Parallelization strategy / 并行化策略:
|
| 116 |
+
- program_id(0) = bh: one program per (batch, head) pair
|
| 117 |
+
每个 (batch, head) 对一个 program
|
| 118 |
+
- program_id(1) = db: one program per D-dimension block
|
| 119 |
+
每个 D 维 block 一个 program
|
| 120 |
+
- Sequential loop over T (the causal recurrence is inherently sequential)
|
| 121 |
+
沿 T 维串行循环 (因果递推本质上是串行的)
|
| 122 |
+
|
| 123 |
+
Each program computes: acc_t = exp(ld_t) * acc_{t-1} + val_t
|
| 124 |
+
for a BLOCK_D-wide slice of the flattened d_k*d_v state matrix.
|
| 125 |
+
每个 program 计算展平的 d_k*d_v 状态矩阵的一个 BLOCK_D 宽的切片。
|
| 126 |
+
|
| 127 |
+
Note: while the T-loop is sequential within each program,
|
| 128 |
+
B*H*ceil(D/BLOCK_D) programs run in parallel on the GPU.
|
| 129 |
+
注意: 虽然 T 循环在每个 program 内是串行的,
|
| 130 |
+
但 B*H*ceil(D/BLOCK_D) 个 program 在 GPU 上并行运行。
|
| 131 |
+
"""
|
| 132 |
+
bh = tl.program_id(0)
|
| 133 |
+
db = tl.program_id(1)
|
| 134 |
+
d_offs = db * BLOCK_D + tl.arange(0, BLOCK_D)
|
| 135 |
+
d_mask = d_offs < D
|
| 136 |
+
|
| 137 |
+
# acc = S_0 = 0 (identity element of the monoid)
|
| 138 |
+
# acc = S_0 = 0 (幺半群的单位元)
|
| 139 |
+
acc = tl.zeros([BLOCK_D], dtype=tl.float32)
|
| 140 |
+
|
| 141 |
+
ld_base = LD_ptr + bh * s_ld_bh
|
| 142 |
+
v_base = V_ptr + bh * s_v_bh
|
| 143 |
+
o_base = O_ptr + bh * s_o_bh
|
| 144 |
+
|
| 145 |
+
for t in range(T):
|
| 146 |
+
# Load log_decay and compute decay = exp(log_α_t)
|
| 147 |
+
# 加载 log_decay 并计算 decay = exp(log_α_t)
|
| 148 |
+
ld_val = tl.load(ld_base + t * s_ld_t).to(tl.float32)
|
| 149 |
+
decay = tl.exp(ld_val)
|
| 150 |
+
|
| 151 |
+
# Load kv_t (a slice of the outer product k_t⊗v_t)
|
| 152 |
+
# 加载 kv_t (外积 k_t⊗v_t 的一个切片)
|
| 153 |
+
val = tl.load(
|
| 154 |
+
v_base + t * s_v_t + d_offs * s_v_d,
|
| 155 |
+
mask=d_mask, other=0.0,
|
| 156 |
+
).to(tl.float32)
|
| 157 |
+
|
| 158 |
+
# Core recurrence: S_t = α_t · S_{t-1} + kv_t
|
| 159 |
+
# 核心递推: S_t = α_t · S_{t-1} + kv_t
|
| 160 |
+
acc = acc * decay + val
|
| 161 |
+
|
| 162 |
+
# Store S_t
|
| 163 |
+
tl.store(
|
| 164 |
+
o_base + t * s_o_t + d_offs * s_o_d,
|
| 165 |
+
acc, mask=d_mask,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
@triton.jit
|
| 169 |
+
def _scan_bwd_kernel(
|
| 170 |
+
LD_ptr, O_ptr, GO_ptr, GV_ptr, GLD_ptr,
|
| 171 |
+
T, D,
|
| 172 |
+
s_ld_bh, s_ld_t,
|
| 173 |
+
s_o_bh, s_o_t, s_o_d,
|
| 174 |
+
s_go_bh, s_go_t, s_go_d,
|
| 175 |
+
s_gv_bh, s_gv_t, s_gv_d,
|
| 176 |
+
s_gld_bh, s_gld_t,
|
| 177 |
+
BLOCK_D: tl.constexpr,
|
| 178 |
+
):
|
| 179 |
+
"""
|
| 180 |
+
Backward scan kernel — computes gradients via adjoint method.
|
| 181 |
+
反向扫描核函数 — 通过伴随方法计算梯度。
|
| 182 |
+
|
| 183 |
+
The forward recurrence is: y_t = a_t * y_{t-1} + x_t
|
| 184 |
+
前向递推: y_t = a_t * y_{t-1} + x_t
|
| 185 |
+
|
| 186 |
+
The adjoint (reverse-time) recurrence for the Lagrange multiplier λ:
|
| 187 |
+
λ_t = ∂L/∂y_t + a_{t+1} · λ_{t+1} (backward in time)
|
| 188 |
+
伴随 (逆时间) 递推的拉格朗日乘子 λ:
|
| 189 |
+
λ_t = ∂L/∂y_t + a_{t+1} · λ_{t+1} (时间反向)
|
| 190 |
+
|
| 191 |
+
Gradients / 梯度:
|
| 192 |
+
∂L/∂x_t = λ_t (gradient w.r.t. input values)
|
| 193 |
+
(对输入值的梯度)
|
| 194 |
+
∂L/∂log_a_t = a_t · Σ_D(λ_t · y_{t-1}) (gradient w.r.t. log-decay)
|
| 195 |
+
(对对数衰减的梯度)
|
| 196 |
+
|
| 197 |
+
The gradient of log_decay is critical for training the decay gate:
|
| 198 |
+
it tells the model how to adjust each head's forgetting rate.
|
| 199 |
+
log_decay 的梯度对训练衰减门至关重要:
|
| 200 |
+
它告诉模型如何调整每个头的遗忘速率。
|
| 201 |
+
"""
|
| 202 |
+
bh = tl.program_id(0)
|
| 203 |
+
db = tl.program_id(1)
|
| 204 |
+
d_offs = db * BLOCK_D + tl.arange(0, BLOCK_D)
|
| 205 |
+
d_mask = d_offs < D
|
| 206 |
+
|
| 207 |
+
# adj holds a_{t+1} · λ_{t+1}, initialized to 0 at the sequence end
|
| 208 |
+
# adj 保存 a_{t+1} · λ_{t+1}, 在序列末尾初始化为 0
|
| 209 |
+
adj = tl.zeros([BLOCK_D], dtype=tl.float32)
|
| 210 |
+
|
| 211 |
+
for t_rev in range(T):
|
| 212 |
+
t = T - 1 - t_rev # reverse time / 逆序时间
|
| 213 |
+
|
| 214 |
+
# Load ∂L/∂y_t (upstream gradient)
|
| 215 |
+
# 加载 ∂L/∂y_t (上游梯度)
|
| 216 |
+
go = tl.load(
|
| 217 |
+
GO_ptr + bh * s_go_bh + t * s_go_t + d_offs * s_go_d,
|
| 218 |
+
mask=d_mask, other=0.0,
|
| 219 |
+
).to(tl.float32)
|
| 220 |
+
|
| 221 |
+
# Adjoint: λ_t = ∂L/∂y_t + a_{t+1} · λ_{t+1}
|
| 222 |
+
# 伴随: λ_t = ∂L/∂y_t + a_{t+1} · λ_{t+1}
|
| 223 |
+
lam = go + adj
|
| 224 |
+
|
| 225 |
+
# ∂L/∂x_t = λ_t (gradient of values / 值的梯度)
|
| 226 |
+
tl.store(
|
| 227 |
+
GV_ptr + bh * s_gv_bh + t * s_gv_t + d_offs * s_gv_d,
|
| 228 |
+
lam, mask=d_mask,
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
# ∂L/∂log_a_t = a_t · Σ_D(λ_t · y_{t-1})
|
| 232 |
+
# This gradient flows back to the decay gate (decay_proj),
|
| 233 |
+
# teaching the model how to control causal information retention.
|
| 234 |
+
# 此梯度回传到衰减门 (decay_proj),
|
| 235 |
+
# 教模型如何控制因果信息的保留。
|
| 236 |
+
ld_val = tl.load(LD_ptr + bh * s_ld_bh + t * s_ld_t).to(tl.float32)
|
| 237 |
+
a_t = tl.exp(ld_val)
|
| 238 |
+
|
| 239 |
+
if t > 0:
|
| 240 |
+
y_prev = tl.load(
|
| 241 |
+
O_ptr + bh * s_o_bh + (t - 1) * s_o_t + d_offs * s_o_d,
|
| 242 |
+
mask=d_mask, other=0.0,
|
| 243 |
+
).to(tl.float32)
|
| 244 |
+
grad_ld_partial = tl.sum(lam * y_prev) * a_t
|
| 245 |
+
tl.atomic_add(GLD_ptr + bh * s_gld_bh + t * s_gld_t, grad_ld_partial)
|
| 246 |
+
|
| 247 |
+
# Prepare for next step (t-1): adj = a_t · λ_t
|
| 248 |
+
# 为下一步 (t-1) 准备: adj = a_t · λ_t
|
| 249 |
+
adj = a_t * lam
|
| 250 |
+
|
| 251 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 252 |
+
# Autograd Function — bridges Triton kernels with PyTorch autograd
|
| 253 |
+
# 自动微分函数 — 将 Triton 核函数与 PyTorch 自动微分桥接
|
| 254 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 255 |
+
|
| 256 |
+
class _ParallelScanFn(Function):
|
| 257 |
+
"""
|
| 258 |
+
Custom autograd function for the parallel prefix scan.
|
| 259 |
+
并行前缀扫描的自定义 autograd 函数。
|
| 260 |
+
|
| 261 |
+
Forward: launches _scan_fwd_kernel to compute all prefix states.
|
| 262 |
+
Backward: launches _scan_bwd_kernel to compute gradients via adjoint method.
|
| 263 |
+
|
| 264 |
+
前向: 启动 _scan_fwd_kernel 计算所有前缀状态。
|
| 265 |
+
反向: 启动 _scan_bwd_kernel 通过伴随方法计算梯度。
|
| 266 |
+
"""
|
| 267 |
+
@staticmethod
|
| 268 |
+
def forward(ctx, log_decays: Tensor, values: Tensor) -> Tensor:
|
| 269 |
+
B, H, T, D_k, D_v = values.shape
|
| 270 |
+
D = D_k * D_v # flattened state dimension / 展平的状态维度
|
| 271 |
+
|
| 272 |
+
# Flatten: [B,H,T,1] → [BH, T], [B,H,T,Dk,Dv] → [BH, T, D]
|
| 273 |
+
# 展平: [B,H,T,1] → [BH, T], [B,H,T,Dk,Dv] → [BH, T, D]
|
| 274 |
+
ld_flat = log_decays.squeeze(-1).contiguous().reshape(B * H, T)
|
| 275 |
+
v_flat = values.reshape(B * H, T, D).contiguous()
|
| 276 |
+
o_flat = torch.empty_like(v_flat)
|
| 277 |
+
|
| 278 |
+
BH = B * H
|
| 279 |
+
BLOCK_D = min(triton.next_power_of_2(D), 1024)
|
| 280 |
+
# Grid: (BH, ceil(D/BLOCK_D)) — one program per (batch*head, D-block)
|
| 281 |
+
# 网格: (BH, ceil(D/BLOCK_D)) — 每个 (batch*head, D-block) 一个 program
|
| 282 |
+
grid = (BH, triton.cdiv(D, BLOCK_D))
|
| 283 |
+
|
| 284 |
+
_scan_fwd_kernel[grid](
|
| 285 |
+
ld_flat, v_flat, o_flat,
|
| 286 |
+
T, D,
|
| 287 |
+
ld_flat.stride(0), ld_flat.stride(1),
|
| 288 |
+
v_flat.stride(0), v_flat.stride(1), v_flat.stride(2),
|
| 289 |
+
o_flat.stride(0), o_flat.stride(1), o_flat.stride(2),
|
| 290 |
+
BLOCK_D=BLOCK_D,
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
# Save for backward: need log_decays and forward outputs y_t
|
| 294 |
+
# 为反向传播保存: 需要 log_decays 和前向输出 y_t
|
| 295 |
+
ctx.save_for_backward(ld_flat, o_flat)
|
| 296 |
+
ctx.shape_info = (B, H, T, D_k, D_v, D, BH, BLOCK_D)
|
| 297 |
+
return o_flat.reshape(B, H, T, D_k, D_v)
|
| 298 |
+
|
| 299 |
+
@staticmethod
|
| 300 |
+
def backward(ctx, grad_output: Tensor):
|
| 301 |
+
ld_flat, o_flat = ctx.saved_tensors
|
| 302 |
+
B, H, T, D_k, D_v, D, BH, BLOCK_D = ctx.shape_info
|
| 303 |
+
|
| 304 |
+
go_flat = grad_output.reshape(BH, T, D).contiguous()
|
| 305 |
+
gv_flat = torch.empty_like(go_flat)
|
| 306 |
+
# Use f32 for atomic_add precision in gradient accumulation
|
| 307 |
+
# 使用 f32 保证 atomic_add 梯度累积的精度
|
| 308 |
+
gld_flat = torch.zeros(BH, T, device=ld_flat.device, dtype=torch.float32)
|
| 309 |
+
|
| 310 |
+
grid = (BH, triton.cdiv(D, BLOCK_D))
|
| 311 |
+
|
| 312 |
+
_scan_bwd_kernel[grid](
|
| 313 |
+
ld_flat, o_flat, go_flat, gv_flat, gld_flat,
|
| 314 |
+
T, D,
|
| 315 |
+
ld_flat.stride(0), ld_flat.stride(1),
|
| 316 |
+
o_flat.stride(0), o_flat.stride(1), o_flat.stride(2),
|
| 317 |
+
go_flat.stride(0), go_flat.stride(1), go_flat.stride(2),
|
| 318 |
+
gv_flat.stride(0), gv_flat.stride(1), gv_flat.stride(2),
|
| 319 |
+
gld_flat.stride(0), gld_flat.stride(1),
|
| 320 |
+
BLOCK_D=BLOCK_D,
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
grad_log_decays = gld_flat.to(grad_output.dtype).reshape(B, H, T, 1)
|
| 324 |
+
grad_values = gv_flat.reshape(B, H, T, D_k, D_v)
|
| 325 |
+
return grad_log_decays, grad_values
|
| 326 |
+
|
| 327 |
+
def _triton_parallel_scan(log_decays: Tensor, values: Tensor) -> Tensor:
|
| 328 |
+
"""Triton-accelerated parallel scan entry point.
|
| 329 |
+
Triton 加速的并行扫描入口。"""
|
| 330 |
+
return _ParallelScanFn.apply(log_decays, values)
|
| 331 |
+
|
| 332 |
+
else:
|
| 333 |
+
_triton_parallel_scan = None
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 337 |
+
# Public API / 公共接口
|
| 338 |
+
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
| 339 |
+
|
| 340 |
+
def parallel_scan(log_decays: Tensor, values: Tensor) -> Tensor:
|
| 341 |
+
"""
|
| 342 |
+
Parallel prefix scan — computes all prefix monoid sums.
|
| 343 |
+
并行前缀扫描 — 计算所有前缀幺半群和���
|
| 344 |
+
|
| 345 |
+
This is the training-time workhorse of Monoid Attention.
|
| 346 |
+
It computes S_1, S_2, ..., S_T where S_t = α_t·S_{t-1} + kv_t,
|
| 347 |
+
for ALL timesteps simultaneously.
|
| 348 |
+
这是幺半群注意力训练时的主力计算。
|
| 349 |
+
它同时计算所有时间步的 S_1, S_2, ..., S_T,
|
| 350 |
+
其中 S_t = α_t·S_{t-1} + kv_t。
|
| 351 |
+
|
| 352 |
+
Auto-dispatches based on device:
|
| 353 |
+
CUDA → Triton JIT kernel (fast, with custom backward)
|
| 354 |
+
CPU/MPS → PyTorch sequential scan (correct, slower)
|
| 355 |
+
根据设备自动分派:
|
| 356 |
+
CUDA → Triton JIT 核函数 (快速, 带自定义反向传播)
|
| 357 |
+
CPU/MPS → PyTorch 串行扫描 (正确, 较慢)
|
| 358 |
+
|
| 359 |
+
Args:
|
| 360 |
+
log_decays: [B, H, T, 1] — log of decay gates α_t
|
| 361 |
+
衰减门 α_t 的对数
|
| 362 |
+
values: [B, H, T, D_k, D_v] — outer products k_t⊗v_t
|
| 363 |
+
外积 k_t⊗v_t
|
| 364 |
+
Returns:
|
| 365 |
+
states: [B, H, T, D_k, D_v] — all prefix states S_1..S_T
|
| 366 |
+
所有前缀状态 S_1..S_T
|
| 367 |
+
"""
|
| 368 |
+
if _triton_parallel_scan is not None and values.is_cuda:
|
| 369 |
+
return _triton_parallel_scan(log_decays, values)
|
| 370 |
+
return _sequential_scan(log_decays, values)
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
def parallel_scan_with_state(
|
| 374 |
+
log_decays: Tensor, values: Tensor,
|
| 375 |
+
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
|
| 376 |
+
"""
|
| 377 |
+
Parallel prefix scan + extract final state for inference handoff.
|
| 378 |
+
并行前缀扫描 + 提取最终状态用于推理切换。
|
| 379 |
+
|
| 380 |
+
Used during prefill: compute all training-time prefix states,
|
| 381 |
+
AND extract the final accumulated state S_T so that subsequent
|
| 382 |
+
tokens can be generated in O(1) RNN mode via monoid_op.
|
| 383 |
+
在预填充时使用: 计算所有训练时的前缀状态,
|
| 384 |
+
同时提取最终累积状态 S_T, 以便后续 token 可以
|
| 385 |
+
通过 monoid_op 以 O(1) RNN 模式生成。
|
| 386 |
+
|
| 387 |
+
This is the bridge between training mode (parallel scan)
|
| 388 |
+
and inference mode (sequential monoid_op).
|
| 389 |
+
这是训练模式 (并行扫描) 和推理模式 (串行 monoid_op) 之间的桥梁。
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
log_decays: [B, H, T, 1]
|
| 393 |
+
values: [B, H, T, D_k, D_v]
|
| 394 |
+
|
| 395 |
+
Returns:
|
| 396 |
+
output: [B, H, T, D_k, D_v] — all prefix states S_1..S_T
|
| 397 |
+
所有前缀状态
|
| 398 |
+
final_state: (log_acc, S_T) where
|
| 399 |
+
log_acc: [B, H, 1] — accumulated log-decay (for future monoid_op)
|
| 400 |
+
累积对数衰减 (供后续 monoid_op 使用)
|
| 401 |
+
final_state: [B, H, D_k, D_v] — S_T, the compressed causal summary
|
| 402 |
+
S_T, 压缩的因果摘要
|
| 403 |
+
"""
|
| 404 |
+
output = parallel_scan(log_decays, values)
|
| 405 |
+
# Sum all log-decays to get the total accumulated decay
|
| 406 |
+
# 对所有 log-decay 求和得到总累积衰减
|
| 407 |
+
log_acc = log_decays.squeeze(-1).sum(dim=2, keepdim=True) # [B, H, 1]
|
| 408 |
+
# The last timestep's state IS the full causal summary
|
| 409 |
+
# 最后一个时间步的状态就是完整的因果摘要
|
| 410 |
+
final_state = output[:, :, -1] # [B, H, D_k, D_v]
|
| 411 |
+
return output, (log_acc, final_state)
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<|begin_of_text|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|eot_id|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<|eot_id|>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
}
|
| 23 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
|
| 3 |
+
size 17209920
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,2063 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"128000": {
|
| 4 |
+
"content": "<|begin_of_text|>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"128001": {
|
| 12 |
+
"content": "<|end_of_text|>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"128002": {
|
| 20 |
+
"content": "<|reserved_special_token_0|>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"128003": {
|
| 28 |
+
"content": "<|reserved_special_token_1|>",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"128004": {
|
| 36 |
+
"content": "<|finetune_right_pad_id|>",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
},
|
| 43 |
+
"128005": {
|
| 44 |
+
"content": "<|reserved_special_token_2|>",
|
| 45 |
+
"lstrip": false,
|
| 46 |
+
"normalized": false,
|
| 47 |
+
"rstrip": false,
|
| 48 |
+
"single_word": false,
|
| 49 |
+
"special": true
|
| 50 |
+
},
|
| 51 |
+
"128006": {
|
| 52 |
+
"content": "<|start_header_id|>",
|
| 53 |
+
"lstrip": false,
|
| 54 |
+
"normalized": false,
|
| 55 |
+
"rstrip": false,
|
| 56 |
+
"single_word": false,
|
| 57 |
+
"special": true
|
| 58 |
+
},
|
| 59 |
+
"128007": {
|
| 60 |
+
"content": "<|end_header_id|>",
|
| 61 |
+
"lstrip": false,
|
| 62 |
+
"normalized": false,
|
| 63 |
+
"rstrip": false,
|
| 64 |
+
"single_word": false,
|
| 65 |
+
"special": true
|
| 66 |
+
},
|
| 67 |
+
"128008": {
|
| 68 |
+
"content": "<|eom_id|>",
|
| 69 |
+
"lstrip": false,
|
| 70 |
+
"normalized": false,
|
| 71 |
+
"rstrip": false,
|
| 72 |
+
"single_word": false,
|
| 73 |
+
"special": true
|
| 74 |
+
},
|
| 75 |
+
"128009": {
|
| 76 |
+
"content": "<|eot_id|>",
|
| 77 |
+
"lstrip": false,
|
| 78 |
+
"normalized": false,
|
| 79 |
+
"rstrip": false,
|
| 80 |
+
"single_word": false,
|
| 81 |
+
"special": true
|
| 82 |
+
},
|
| 83 |
+
"128010": {
|
| 84 |
+
"content": "<|python_tag|>",
|
| 85 |
+
"lstrip": false,
|
| 86 |
+
"normalized": false,
|
| 87 |
+
"rstrip": false,
|
| 88 |
+
"single_word": false,
|
| 89 |
+
"special": true
|
| 90 |
+
},
|
| 91 |
+
"128011": {
|
| 92 |
+
"content": "<|reserved_special_token_3|>",
|
| 93 |
+
"lstrip": false,
|
| 94 |
+
"normalized": false,
|
| 95 |
+
"rstrip": false,
|
| 96 |
+
"single_word": false,
|
| 97 |
+
"special": true
|
| 98 |
+
},
|
| 99 |
+
"128012": {
|
| 100 |
+
"content": "<|reserved_special_token_4|>",
|
| 101 |
+
"lstrip": false,
|
| 102 |
+
"normalized": false,
|
| 103 |
+
"rstrip": false,
|
| 104 |
+
"single_word": false,
|
| 105 |
+
"special": true
|
| 106 |
+
},
|
| 107 |
+
"128013": {
|
| 108 |
+
"content": "<|reserved_special_token_5|>",
|
| 109 |
+
"lstrip": false,
|
| 110 |
+
"normalized": false,
|
| 111 |
+
"rstrip": false,
|
| 112 |
+
"single_word": false,
|
| 113 |
+
"special": true
|
| 114 |
+
},
|
| 115 |
+
"128014": {
|
| 116 |
+
"content": "<|reserved_special_token_6|>",
|
| 117 |
+
"lstrip": false,
|
| 118 |
+
"normalized": false,
|
| 119 |
+
"rstrip": false,
|
| 120 |
+
"single_word": false,
|
| 121 |
+
"special": true
|
| 122 |
+
},
|
| 123 |
+
"128015": {
|
| 124 |
+
"content": "<|reserved_special_token_7|>",
|
| 125 |
+
"lstrip": false,
|
| 126 |
+
"normalized": false,
|
| 127 |
+
"rstrip": false,
|
| 128 |
+
"single_word": false,
|
| 129 |
+
"special": true
|
| 130 |
+
},
|
| 131 |
+
"128016": {
|
| 132 |
+
"content": "<|reserved_special_token_8|>",
|
| 133 |
+
"lstrip": false,
|
| 134 |
+
"normalized": false,
|
| 135 |
+
"rstrip": false,
|
| 136 |
+
"single_word": false,
|
| 137 |
+
"special": true
|
| 138 |
+
},
|
| 139 |
+
"128017": {
|
| 140 |
+
"content": "<|reserved_special_token_9|>",
|
| 141 |
+
"lstrip": false,
|
| 142 |
+
"normalized": false,
|
| 143 |
+
"rstrip": false,
|
| 144 |
+
"single_word": false,
|
| 145 |
+
"special": true
|
| 146 |
+
},
|
| 147 |
+
"128018": {
|
| 148 |
+
"content": "<|reserved_special_token_10|>",
|
| 149 |
+
"lstrip": false,
|
| 150 |
+
"normalized": false,
|
| 151 |
+
"rstrip": false,
|
| 152 |
+
"single_word": false,
|
| 153 |
+
"special": true
|
| 154 |
+
},
|
| 155 |
+
"128019": {
|
| 156 |
+
"content": "<|reserved_special_token_11|>",
|
| 157 |
+
"lstrip": false,
|
| 158 |
+
"normalized": false,
|
| 159 |
+
"rstrip": false,
|
| 160 |
+
"single_word": false,
|
| 161 |
+
"special": true
|
| 162 |
+
},
|
| 163 |
+
"128020": {
|
| 164 |
+
"content": "<|reserved_special_token_12|>",
|
| 165 |
+
"lstrip": false,
|
| 166 |
+
"normalized": false,
|
| 167 |
+
"rstrip": false,
|
| 168 |
+
"single_word": false,
|
| 169 |
+
"special": true
|
| 170 |
+
},
|
| 171 |
+
"128021": {
|
| 172 |
+
"content": "<|reserved_special_token_13|>",
|
| 173 |
+
"lstrip": false,
|
| 174 |
+
"normalized": false,
|
| 175 |
+
"rstrip": false,
|
| 176 |
+
"single_word": false,
|
| 177 |
+
"special": true
|
| 178 |
+
},
|
| 179 |
+
"128022": {
|
| 180 |
+
"content": "<|reserved_special_token_14|>",
|
| 181 |
+
"lstrip": false,
|
| 182 |
+
"normalized": false,
|
| 183 |
+
"rstrip": false,
|
| 184 |
+
"single_word": false,
|
| 185 |
+
"special": true
|
| 186 |
+
},
|
| 187 |
+
"128023": {
|
| 188 |
+
"content": "<|reserved_special_token_15|>",
|
| 189 |
+
"lstrip": false,
|
| 190 |
+
"normalized": false,
|
| 191 |
+
"rstrip": false,
|
| 192 |
+
"single_word": false,
|
| 193 |
+
"special": true
|
| 194 |
+
},
|
| 195 |
+
"128024": {
|
| 196 |
+
"content": "<|reserved_special_token_16|>",
|
| 197 |
+
"lstrip": false,
|
| 198 |
+
"normalized": false,
|
| 199 |
+
"rstrip": false,
|
| 200 |
+
"single_word": false,
|
| 201 |
+
"special": true
|
| 202 |
+
},
|
| 203 |
+
"128025": {
|
| 204 |
+
"content": "<|reserved_special_token_17|>",
|
| 205 |
+
"lstrip": false,
|
| 206 |
+
"normalized": false,
|
| 207 |
+
"rstrip": false,
|
| 208 |
+
"single_word": false,
|
| 209 |
+
"special": true
|
| 210 |
+
},
|
| 211 |
+
"128026": {
|
| 212 |
+
"content": "<|reserved_special_token_18|>",
|
| 213 |
+
"lstrip": false,
|
| 214 |
+
"normalized": false,
|
| 215 |
+
"rstrip": false,
|
| 216 |
+
"single_word": false,
|
| 217 |
+
"special": true
|
| 218 |
+
},
|
| 219 |
+
"128027": {
|
| 220 |
+
"content": "<|reserved_special_token_19|>",
|
| 221 |
+
"lstrip": false,
|
| 222 |
+
"normalized": false,
|
| 223 |
+
"rstrip": false,
|
| 224 |
+
"single_word": false,
|
| 225 |
+
"special": true
|
| 226 |
+
},
|
| 227 |
+
"128028": {
|
| 228 |
+
"content": "<|reserved_special_token_20|>",
|
| 229 |
+
"lstrip": false,
|
| 230 |
+
"normalized": false,
|
| 231 |
+
"rstrip": false,
|
| 232 |
+
"single_word": false,
|
| 233 |
+
"special": true
|
| 234 |
+
},
|
| 235 |
+
"128029": {
|
| 236 |
+
"content": "<|reserved_special_token_21|>",
|
| 237 |
+
"lstrip": false,
|
| 238 |
+
"normalized": false,
|
| 239 |
+
"rstrip": false,
|
| 240 |
+
"single_word": false,
|
| 241 |
+
"special": true
|
| 242 |
+
},
|
| 243 |
+
"128030": {
|
| 244 |
+
"content": "<|reserved_special_token_22|>",
|
| 245 |
+
"lstrip": false,
|
| 246 |
+
"normalized": false,
|
| 247 |
+
"rstrip": false,
|
| 248 |
+
"single_word": false,
|
| 249 |
+
"special": true
|
| 250 |
+
},
|
| 251 |
+
"128031": {
|
| 252 |
+
"content": "<|reserved_special_token_23|>",
|
| 253 |
+
"lstrip": false,
|
| 254 |
+
"normalized": false,
|
| 255 |
+
"rstrip": false,
|
| 256 |
+
"single_word": false,
|
| 257 |
+
"special": true
|
| 258 |
+
},
|
| 259 |
+
"128032": {
|
| 260 |
+
"content": "<|reserved_special_token_24|>",
|
| 261 |
+
"lstrip": false,
|
| 262 |
+
"normalized": false,
|
| 263 |
+
"rstrip": false,
|
| 264 |
+
"single_word": false,
|
| 265 |
+
"special": true
|
| 266 |
+
},
|
| 267 |
+
"128033": {
|
| 268 |
+
"content": "<|reserved_special_token_25|>",
|
| 269 |
+
"lstrip": false,
|
| 270 |
+
"normalized": false,
|
| 271 |
+
"rstrip": false,
|
| 272 |
+
"single_word": false,
|
| 273 |
+
"special": true
|
| 274 |
+
},
|
| 275 |
+
"128034": {
|
| 276 |
+
"content": "<|reserved_special_token_26|>",
|
| 277 |
+
"lstrip": false,
|
| 278 |
+
"normalized": false,
|
| 279 |
+
"rstrip": false,
|
| 280 |
+
"single_word": false,
|
| 281 |
+
"special": true
|
| 282 |
+
},
|
| 283 |
+
"128035": {
|
| 284 |
+
"content": "<|reserved_special_token_27|>",
|
| 285 |
+
"lstrip": false,
|
| 286 |
+
"normalized": false,
|
| 287 |
+
"rstrip": false,
|
| 288 |
+
"single_word": false,
|
| 289 |
+
"special": true
|
| 290 |
+
},
|
| 291 |
+
"128036": {
|
| 292 |
+
"content": "<|reserved_special_token_28|>",
|
| 293 |
+
"lstrip": false,
|
| 294 |
+
"normalized": false,
|
| 295 |
+
"rstrip": false,
|
| 296 |
+
"single_word": false,
|
| 297 |
+
"special": true
|
| 298 |
+
},
|
| 299 |
+
"128037": {
|
| 300 |
+
"content": "<|reserved_special_token_29|>",
|
| 301 |
+
"lstrip": false,
|
| 302 |
+
"normalized": false,
|
| 303 |
+
"rstrip": false,
|
| 304 |
+
"single_word": false,
|
| 305 |
+
"special": true
|
| 306 |
+
},
|
| 307 |
+
"128038": {
|
| 308 |
+
"content": "<|reserved_special_token_30|>",
|
| 309 |
+
"lstrip": false,
|
| 310 |
+
"normalized": false,
|
| 311 |
+
"rstrip": false,
|
| 312 |
+
"single_word": false,
|
| 313 |
+
"special": true
|
| 314 |
+
},
|
| 315 |
+
"128039": {
|
| 316 |
+
"content": "<|reserved_special_token_31|>",
|
| 317 |
+
"lstrip": false,
|
| 318 |
+
"normalized": false,
|
| 319 |
+
"rstrip": false,
|
| 320 |
+
"single_word": false,
|
| 321 |
+
"special": true
|
| 322 |
+
},
|
| 323 |
+
"128040": {
|
| 324 |
+
"content": "<|reserved_special_token_32|>",
|
| 325 |
+
"lstrip": false,
|
| 326 |
+
"normalized": false,
|
| 327 |
+
"rstrip": false,
|
| 328 |
+
"single_word": false,
|
| 329 |
+
"special": true
|
| 330 |
+
},
|
| 331 |
+
"128041": {
|
| 332 |
+
"content": "<|reserved_special_token_33|>",
|
| 333 |
+
"lstrip": false,
|
| 334 |
+
"normalized": false,
|
| 335 |
+
"rstrip": false,
|
| 336 |
+
"single_word": false,
|
| 337 |
+
"special": true
|
| 338 |
+
},
|
| 339 |
+
"128042": {
|
| 340 |
+
"content": "<|reserved_special_token_34|>",
|
| 341 |
+
"lstrip": false,
|
| 342 |
+
"normalized": false,
|
| 343 |
+
"rstrip": false,
|
| 344 |
+
"single_word": false,
|
| 345 |
+
"special": true
|
| 346 |
+
},
|
| 347 |
+
"128043": {
|
| 348 |
+
"content": "<|reserved_special_token_35|>",
|
| 349 |
+
"lstrip": false,
|
| 350 |
+
"normalized": false,
|
| 351 |
+
"rstrip": false,
|
| 352 |
+
"single_word": false,
|
| 353 |
+
"special": true
|
| 354 |
+
},
|
| 355 |
+
"128044": {
|
| 356 |
+
"content": "<|reserved_special_token_36|>",
|
| 357 |
+
"lstrip": false,
|
| 358 |
+
"normalized": false,
|
| 359 |
+
"rstrip": false,
|
| 360 |
+
"single_word": false,
|
| 361 |
+
"special": true
|
| 362 |
+
},
|
| 363 |
+
"128045": {
|
| 364 |
+
"content": "<|reserved_special_token_37|>",
|
| 365 |
+
"lstrip": false,
|
| 366 |
+
"normalized": false,
|
| 367 |
+
"rstrip": false,
|
| 368 |
+
"single_word": false,
|
| 369 |
+
"special": true
|
| 370 |
+
},
|
| 371 |
+
"128046": {
|
| 372 |
+
"content": "<|reserved_special_token_38|>",
|
| 373 |
+
"lstrip": false,
|
| 374 |
+
"normalized": false,
|
| 375 |
+
"rstrip": false,
|
| 376 |
+
"single_word": false,
|
| 377 |
+
"special": true
|
| 378 |
+
},
|
| 379 |
+
"128047": {
|
| 380 |
+
"content": "<|reserved_special_token_39|>",
|
| 381 |
+
"lstrip": false,
|
| 382 |
+
"normalized": false,
|
| 383 |
+
"rstrip": false,
|
| 384 |
+
"single_word": false,
|
| 385 |
+
"special": true
|
| 386 |
+
},
|
| 387 |
+
"128048": {
|
| 388 |
+
"content": "<|reserved_special_token_40|>",
|
| 389 |
+
"lstrip": false,
|
| 390 |
+
"normalized": false,
|
| 391 |
+
"rstrip": false,
|
| 392 |
+
"single_word": false,
|
| 393 |
+
"special": true
|
| 394 |
+
},
|
| 395 |
+
"128049": {
|
| 396 |
+
"content": "<|reserved_special_token_41|>",
|
| 397 |
+
"lstrip": false,
|
| 398 |
+
"normalized": false,
|
| 399 |
+
"rstrip": false,
|
| 400 |
+
"single_word": false,
|
| 401 |
+
"special": true
|
| 402 |
+
},
|
| 403 |
+
"128050": {
|
| 404 |
+
"content": "<|reserved_special_token_42|>",
|
| 405 |
+
"lstrip": false,
|
| 406 |
+
"normalized": false,
|
| 407 |
+
"rstrip": false,
|
| 408 |
+
"single_word": false,
|
| 409 |
+
"special": true
|
| 410 |
+
},
|
| 411 |
+
"128051": {
|
| 412 |
+
"content": "<|reserved_special_token_43|>",
|
| 413 |
+
"lstrip": false,
|
| 414 |
+
"normalized": false,
|
| 415 |
+
"rstrip": false,
|
| 416 |
+
"single_word": false,
|
| 417 |
+
"special": true
|
| 418 |
+
},
|
| 419 |
+
"128052": {
|
| 420 |
+
"content": "<|reserved_special_token_44|>",
|
| 421 |
+
"lstrip": false,
|
| 422 |
+
"normalized": false,
|
| 423 |
+
"rstrip": false,
|
| 424 |
+
"single_word": false,
|
| 425 |
+
"special": true
|
| 426 |
+
},
|
| 427 |
+
"128053": {
|
| 428 |
+
"content": "<|reserved_special_token_45|>",
|
| 429 |
+
"lstrip": false,
|
| 430 |
+
"normalized": false,
|
| 431 |
+
"rstrip": false,
|
| 432 |
+
"single_word": false,
|
| 433 |
+
"special": true
|
| 434 |
+
},
|
| 435 |
+
"128054": {
|
| 436 |
+
"content": "<|reserved_special_token_46|>",
|
| 437 |
+
"lstrip": false,
|
| 438 |
+
"normalized": false,
|
| 439 |
+
"rstrip": false,
|
| 440 |
+
"single_word": false,
|
| 441 |
+
"special": true
|
| 442 |
+
},
|
| 443 |
+
"128055": {
|
| 444 |
+
"content": "<|reserved_special_token_47|>",
|
| 445 |
+
"lstrip": false,
|
| 446 |
+
"normalized": false,
|
| 447 |
+
"rstrip": false,
|
| 448 |
+
"single_word": false,
|
| 449 |
+
"special": true
|
| 450 |
+
},
|
| 451 |
+
"128056": {
|
| 452 |
+
"content": "<|reserved_special_token_48|>",
|
| 453 |
+
"lstrip": false,
|
| 454 |
+
"normalized": false,
|
| 455 |
+
"rstrip": false,
|
| 456 |
+
"single_word": false,
|
| 457 |
+
"special": true
|
| 458 |
+
},
|
| 459 |
+
"128057": {
|
| 460 |
+
"content": "<|reserved_special_token_49|>",
|
| 461 |
+
"lstrip": false,
|
| 462 |
+
"normalized": false,
|
| 463 |
+
"rstrip": false,
|
| 464 |
+
"single_word": false,
|
| 465 |
+
"special": true
|
| 466 |
+
},
|
| 467 |
+
"128058": {
|
| 468 |
+
"content": "<|reserved_special_token_50|>",
|
| 469 |
+
"lstrip": false,
|
| 470 |
+
"normalized": false,
|
| 471 |
+
"rstrip": false,
|
| 472 |
+
"single_word": false,
|
| 473 |
+
"special": true
|
| 474 |
+
},
|
| 475 |
+
"128059": {
|
| 476 |
+
"content": "<|reserved_special_token_51|>",
|
| 477 |
+
"lstrip": false,
|
| 478 |
+
"normalized": false,
|
| 479 |
+
"rstrip": false,
|
| 480 |
+
"single_word": false,
|
| 481 |
+
"special": true
|
| 482 |
+
},
|
| 483 |
+
"128060": {
|
| 484 |
+
"content": "<|reserved_special_token_52|>",
|
| 485 |
+
"lstrip": false,
|
| 486 |
+
"normalized": false,
|
| 487 |
+
"rstrip": false,
|
| 488 |
+
"single_word": false,
|
| 489 |
+
"special": true
|
| 490 |
+
},
|
| 491 |
+
"128061": {
|
| 492 |
+
"content": "<|reserved_special_token_53|>",
|
| 493 |
+
"lstrip": false,
|
| 494 |
+
"normalized": false,
|
| 495 |
+
"rstrip": false,
|
| 496 |
+
"single_word": false,
|
| 497 |
+
"special": true
|
| 498 |
+
},
|
| 499 |
+
"128062": {
|
| 500 |
+
"content": "<|reserved_special_token_54|>",
|
| 501 |
+
"lstrip": false,
|
| 502 |
+
"normalized": false,
|
| 503 |
+
"rstrip": false,
|
| 504 |
+
"single_word": false,
|
| 505 |
+
"special": true
|
| 506 |
+
},
|
| 507 |
+
"128063": {
|
| 508 |
+
"content": "<|reserved_special_token_55|>",
|
| 509 |
+
"lstrip": false,
|
| 510 |
+
"normalized": false,
|
| 511 |
+
"rstrip": false,
|
| 512 |
+
"single_word": false,
|
| 513 |
+
"special": true
|
| 514 |
+
},
|
| 515 |
+
"128064": {
|
| 516 |
+
"content": "<|reserved_special_token_56|>",
|
| 517 |
+
"lstrip": false,
|
| 518 |
+
"normalized": false,
|
| 519 |
+
"rstrip": false,
|
| 520 |
+
"single_word": false,
|
| 521 |
+
"special": true
|
| 522 |
+
},
|
| 523 |
+
"128065": {
|
| 524 |
+
"content": "<|reserved_special_token_57|>",
|
| 525 |
+
"lstrip": false,
|
| 526 |
+
"normalized": false,
|
| 527 |
+
"rstrip": false,
|
| 528 |
+
"single_word": false,
|
| 529 |
+
"special": true
|
| 530 |
+
},
|
| 531 |
+
"128066": {
|
| 532 |
+
"content": "<|reserved_special_token_58|>",
|
| 533 |
+
"lstrip": false,
|
| 534 |
+
"normalized": false,
|
| 535 |
+
"rstrip": false,
|
| 536 |
+
"single_word": false,
|
| 537 |
+
"special": true
|
| 538 |
+
},
|
| 539 |
+
"128067": {
|
| 540 |
+
"content": "<|reserved_special_token_59|>",
|
| 541 |
+
"lstrip": false,
|
| 542 |
+
"normalized": false,
|
| 543 |
+
"rstrip": false,
|
| 544 |
+
"single_word": false,
|
| 545 |
+
"special": true
|
| 546 |
+
},
|
| 547 |
+
"128068": {
|
| 548 |
+
"content": "<|reserved_special_token_60|>",
|
| 549 |
+
"lstrip": false,
|
| 550 |
+
"normalized": false,
|
| 551 |
+
"rstrip": false,
|
| 552 |
+
"single_word": false,
|
| 553 |
+
"special": true
|
| 554 |
+
},
|
| 555 |
+
"128069": {
|
| 556 |
+
"content": "<|reserved_special_token_61|>",
|
| 557 |
+
"lstrip": false,
|
| 558 |
+
"normalized": false,
|
| 559 |
+
"rstrip": false,
|
| 560 |
+
"single_word": false,
|
| 561 |
+
"special": true
|
| 562 |
+
},
|
| 563 |
+
"128070": {
|
| 564 |
+
"content": "<|reserved_special_token_62|>",
|
| 565 |
+
"lstrip": false,
|
| 566 |
+
"normalized": false,
|
| 567 |
+
"rstrip": false,
|
| 568 |
+
"single_word": false,
|
| 569 |
+
"special": true
|
| 570 |
+
},
|
| 571 |
+
"128071": {
|
| 572 |
+
"content": "<|reserved_special_token_63|>",
|
| 573 |
+
"lstrip": false,
|
| 574 |
+
"normalized": false,
|
| 575 |
+
"rstrip": false,
|
| 576 |
+
"single_word": false,
|
| 577 |
+
"special": true
|
| 578 |
+
},
|
| 579 |
+
"128072": {
|
| 580 |
+
"content": "<|reserved_special_token_64|>",
|
| 581 |
+
"lstrip": false,
|
| 582 |
+
"normalized": false,
|
| 583 |
+
"rstrip": false,
|
| 584 |
+
"single_word": false,
|
| 585 |
+
"special": true
|
| 586 |
+
},
|
| 587 |
+
"128073": {
|
| 588 |
+
"content": "<|reserved_special_token_65|>",
|
| 589 |
+
"lstrip": false,
|
| 590 |
+
"normalized": false,
|
| 591 |
+
"rstrip": false,
|
| 592 |
+
"single_word": false,
|
| 593 |
+
"special": true
|
| 594 |
+
},
|
| 595 |
+
"128074": {
|
| 596 |
+
"content": "<|reserved_special_token_66|>",
|
| 597 |
+
"lstrip": false,
|
| 598 |
+
"normalized": false,
|
| 599 |
+
"rstrip": false,
|
| 600 |
+
"single_word": false,
|
| 601 |
+
"special": true
|
| 602 |
+
},
|
| 603 |
+
"128075": {
|
| 604 |
+
"content": "<|reserved_special_token_67|>",
|
| 605 |
+
"lstrip": false,
|
| 606 |
+
"normalized": false,
|
| 607 |
+
"rstrip": false,
|
| 608 |
+
"single_word": false,
|
| 609 |
+
"special": true
|
| 610 |
+
},
|
| 611 |
+
"128076": {
|
| 612 |
+
"content": "<|reserved_special_token_68|>",
|
| 613 |
+
"lstrip": false,
|
| 614 |
+
"normalized": false,
|
| 615 |
+
"rstrip": false,
|
| 616 |
+
"single_word": false,
|
| 617 |
+
"special": true
|
| 618 |
+
},
|
| 619 |
+
"128077": {
|
| 620 |
+
"content": "<|reserved_special_token_69|>",
|
| 621 |
+
"lstrip": false,
|
| 622 |
+
"normalized": false,
|
| 623 |
+
"rstrip": false,
|
| 624 |
+
"single_word": false,
|
| 625 |
+
"special": true
|
| 626 |
+
},
|
| 627 |
+
"128078": {
|
| 628 |
+
"content": "<|reserved_special_token_70|>",
|
| 629 |
+
"lstrip": false,
|
| 630 |
+
"normalized": false,
|
| 631 |
+
"rstrip": false,
|
| 632 |
+
"single_word": false,
|
| 633 |
+
"special": true
|
| 634 |
+
},
|
| 635 |
+
"128079": {
|
| 636 |
+
"content": "<|reserved_special_token_71|>",
|
| 637 |
+
"lstrip": false,
|
| 638 |
+
"normalized": false,
|
| 639 |
+
"rstrip": false,
|
| 640 |
+
"single_word": false,
|
| 641 |
+
"special": true
|
| 642 |
+
},
|
| 643 |
+
"128080": {
|
| 644 |
+
"content": "<|reserved_special_token_72|>",
|
| 645 |
+
"lstrip": false,
|
| 646 |
+
"normalized": false,
|
| 647 |
+
"rstrip": false,
|
| 648 |
+
"single_word": false,
|
| 649 |
+
"special": true
|
| 650 |
+
},
|
| 651 |
+
"128081": {
|
| 652 |
+
"content": "<|reserved_special_token_73|>",
|
| 653 |
+
"lstrip": false,
|
| 654 |
+
"normalized": false,
|
| 655 |
+
"rstrip": false,
|
| 656 |
+
"single_word": false,
|
| 657 |
+
"special": true
|
| 658 |
+
},
|
| 659 |
+
"128082": {
|
| 660 |
+
"content": "<|reserved_special_token_74|>",
|
| 661 |
+
"lstrip": false,
|
| 662 |
+
"normalized": false,
|
| 663 |
+
"rstrip": false,
|
| 664 |
+
"single_word": false,
|
| 665 |
+
"special": true
|
| 666 |
+
},
|
| 667 |
+
"128083": {
|
| 668 |
+
"content": "<|reserved_special_token_75|>",
|
| 669 |
+
"lstrip": false,
|
| 670 |
+
"normalized": false,
|
| 671 |
+
"rstrip": false,
|
| 672 |
+
"single_word": false,
|
| 673 |
+
"special": true
|
| 674 |
+
},
|
| 675 |
+
"128084": {
|
| 676 |
+
"content": "<|reserved_special_token_76|>",
|
| 677 |
+
"lstrip": false,
|
| 678 |
+
"normalized": false,
|
| 679 |
+
"rstrip": false,
|
| 680 |
+
"single_word": false,
|
| 681 |
+
"special": true
|
| 682 |
+
},
|
| 683 |
+
"128085": {
|
| 684 |
+
"content": "<|reserved_special_token_77|>",
|
| 685 |
+
"lstrip": false,
|
| 686 |
+
"normalized": false,
|
| 687 |
+
"rstrip": false,
|
| 688 |
+
"single_word": false,
|
| 689 |
+
"special": true
|
| 690 |
+
},
|
| 691 |
+
"128086": {
|
| 692 |
+
"content": "<|reserved_special_token_78|>",
|
| 693 |
+
"lstrip": false,
|
| 694 |
+
"normalized": false,
|
| 695 |
+
"rstrip": false,
|
| 696 |
+
"single_word": false,
|
| 697 |
+
"special": true
|
| 698 |
+
},
|
| 699 |
+
"128087": {
|
| 700 |
+
"content": "<|reserved_special_token_79|>",
|
| 701 |
+
"lstrip": false,
|
| 702 |
+
"normalized": false,
|
| 703 |
+
"rstrip": false,
|
| 704 |
+
"single_word": false,
|
| 705 |
+
"special": true
|
| 706 |
+
},
|
| 707 |
+
"128088": {
|
| 708 |
+
"content": "<|reserved_special_token_80|>",
|
| 709 |
+
"lstrip": false,
|
| 710 |
+
"normalized": false,
|
| 711 |
+
"rstrip": false,
|
| 712 |
+
"single_word": false,
|
| 713 |
+
"special": true
|
| 714 |
+
},
|
| 715 |
+
"128089": {
|
| 716 |
+
"content": "<|reserved_special_token_81|>",
|
| 717 |
+
"lstrip": false,
|
| 718 |
+
"normalized": false,
|
| 719 |
+
"rstrip": false,
|
| 720 |
+
"single_word": false,
|
| 721 |
+
"special": true
|
| 722 |
+
},
|
| 723 |
+
"128090": {
|
| 724 |
+
"content": "<|reserved_special_token_82|>",
|
| 725 |
+
"lstrip": false,
|
| 726 |
+
"normalized": false,
|
| 727 |
+
"rstrip": false,
|
| 728 |
+
"single_word": false,
|
| 729 |
+
"special": true
|
| 730 |
+
},
|
| 731 |
+
"128091": {
|
| 732 |
+
"content": "<|reserved_special_token_83|>",
|
| 733 |
+
"lstrip": false,
|
| 734 |
+
"normalized": false,
|
| 735 |
+
"rstrip": false,
|
| 736 |
+
"single_word": false,
|
| 737 |
+
"special": true
|
| 738 |
+
},
|
| 739 |
+
"128092": {
|
| 740 |
+
"content": "<|reserved_special_token_84|>",
|
| 741 |
+
"lstrip": false,
|
| 742 |
+
"normalized": false,
|
| 743 |
+
"rstrip": false,
|
| 744 |
+
"single_word": false,
|
| 745 |
+
"special": true
|
| 746 |
+
},
|
| 747 |
+
"128093": {
|
| 748 |
+
"content": "<|reserved_special_token_85|>",
|
| 749 |
+
"lstrip": false,
|
| 750 |
+
"normalized": false,
|
| 751 |
+
"rstrip": false,
|
| 752 |
+
"single_word": false,
|
| 753 |
+
"special": true
|
| 754 |
+
},
|
| 755 |
+
"128094": {
|
| 756 |
+
"content": "<|reserved_special_token_86|>",
|
| 757 |
+
"lstrip": false,
|
| 758 |
+
"normalized": false,
|
| 759 |
+
"rstrip": false,
|
| 760 |
+
"single_word": false,
|
| 761 |
+
"special": true
|
| 762 |
+
},
|
| 763 |
+
"128095": {
|
| 764 |
+
"content": "<|reserved_special_token_87|>",
|
| 765 |
+
"lstrip": false,
|
| 766 |
+
"normalized": false,
|
| 767 |
+
"rstrip": false,
|
| 768 |
+
"single_word": false,
|
| 769 |
+
"special": true
|
| 770 |
+
},
|
| 771 |
+
"128096": {
|
| 772 |
+
"content": "<|reserved_special_token_88|>",
|
| 773 |
+
"lstrip": false,
|
| 774 |
+
"normalized": false,
|
| 775 |
+
"rstrip": false,
|
| 776 |
+
"single_word": false,
|
| 777 |
+
"special": true
|
| 778 |
+
},
|
| 779 |
+
"128097": {
|
| 780 |
+
"content": "<|reserved_special_token_89|>",
|
| 781 |
+
"lstrip": false,
|
| 782 |
+
"normalized": false,
|
| 783 |
+
"rstrip": false,
|
| 784 |
+
"single_word": false,
|
| 785 |
+
"special": true
|
| 786 |
+
},
|
| 787 |
+
"128098": {
|
| 788 |
+
"content": "<|reserved_special_token_90|>",
|
| 789 |
+
"lstrip": false,
|
| 790 |
+
"normalized": false,
|
| 791 |
+
"rstrip": false,
|
| 792 |
+
"single_word": false,
|
| 793 |
+
"special": true
|
| 794 |
+
},
|
| 795 |
+
"128099": {
|
| 796 |
+
"content": "<|reserved_special_token_91|>",
|
| 797 |
+
"lstrip": false,
|
| 798 |
+
"normalized": false,
|
| 799 |
+
"rstrip": false,
|
| 800 |
+
"single_word": false,
|
| 801 |
+
"special": true
|
| 802 |
+
},
|
| 803 |
+
"128100": {
|
| 804 |
+
"content": "<|reserved_special_token_92|>",
|
| 805 |
+
"lstrip": false,
|
| 806 |
+
"normalized": false,
|
| 807 |
+
"rstrip": false,
|
| 808 |
+
"single_word": false,
|
| 809 |
+
"special": true
|
| 810 |
+
},
|
| 811 |
+
"128101": {
|
| 812 |
+
"content": "<|reserved_special_token_93|>",
|
| 813 |
+
"lstrip": false,
|
| 814 |
+
"normalized": false,
|
| 815 |
+
"rstrip": false,
|
| 816 |
+
"single_word": false,
|
| 817 |
+
"special": true
|
| 818 |
+
},
|
| 819 |
+
"128102": {
|
| 820 |
+
"content": "<|reserved_special_token_94|>",
|
| 821 |
+
"lstrip": false,
|
| 822 |
+
"normalized": false,
|
| 823 |
+
"rstrip": false,
|
| 824 |
+
"single_word": false,
|
| 825 |
+
"special": true
|
| 826 |
+
},
|
| 827 |
+
"128103": {
|
| 828 |
+
"content": "<|reserved_special_token_95|>",
|
| 829 |
+
"lstrip": false,
|
| 830 |
+
"normalized": false,
|
| 831 |
+
"rstrip": false,
|
| 832 |
+
"single_word": false,
|
| 833 |
+
"special": true
|
| 834 |
+
},
|
| 835 |
+
"128104": {
|
| 836 |
+
"content": "<|reserved_special_token_96|>",
|
| 837 |
+
"lstrip": false,
|
| 838 |
+
"normalized": false,
|
| 839 |
+
"rstrip": false,
|
| 840 |
+
"single_word": false,
|
| 841 |
+
"special": true
|
| 842 |
+
},
|
| 843 |
+
"128105": {
|
| 844 |
+
"content": "<|reserved_special_token_97|>",
|
| 845 |
+
"lstrip": false,
|
| 846 |
+
"normalized": false,
|
| 847 |
+
"rstrip": false,
|
| 848 |
+
"single_word": false,
|
| 849 |
+
"special": true
|
| 850 |
+
},
|
| 851 |
+
"128106": {
|
| 852 |
+
"content": "<|reserved_special_token_98|>",
|
| 853 |
+
"lstrip": false,
|
| 854 |
+
"normalized": false,
|
| 855 |
+
"rstrip": false,
|
| 856 |
+
"single_word": false,
|
| 857 |
+
"special": true
|
| 858 |
+
},
|
| 859 |
+
"128107": {
|
| 860 |
+
"content": "<|reserved_special_token_99|>",
|
| 861 |
+
"lstrip": false,
|
| 862 |
+
"normalized": false,
|
| 863 |
+
"rstrip": false,
|
| 864 |
+
"single_word": false,
|
| 865 |
+
"special": true
|
| 866 |
+
},
|
| 867 |
+
"128108": {
|
| 868 |
+
"content": "<|reserved_special_token_100|>",
|
| 869 |
+
"lstrip": false,
|
| 870 |
+
"normalized": false,
|
| 871 |
+
"rstrip": false,
|
| 872 |
+
"single_word": false,
|
| 873 |
+
"special": true
|
| 874 |
+
},
|
| 875 |
+
"128109": {
|
| 876 |
+
"content": "<|reserved_special_token_101|>",
|
| 877 |
+
"lstrip": false,
|
| 878 |
+
"normalized": false,
|
| 879 |
+
"rstrip": false,
|
| 880 |
+
"single_word": false,
|
| 881 |
+
"special": true
|
| 882 |
+
},
|
| 883 |
+
"128110": {
|
| 884 |
+
"content": "<|reserved_special_token_102|>",
|
| 885 |
+
"lstrip": false,
|
| 886 |
+
"normalized": false,
|
| 887 |
+
"rstrip": false,
|
| 888 |
+
"single_word": false,
|
| 889 |
+
"special": true
|
| 890 |
+
},
|
| 891 |
+
"128111": {
|
| 892 |
+
"content": "<|reserved_special_token_103|>",
|
| 893 |
+
"lstrip": false,
|
| 894 |
+
"normalized": false,
|
| 895 |
+
"rstrip": false,
|
| 896 |
+
"single_word": false,
|
| 897 |
+
"special": true
|
| 898 |
+
},
|
| 899 |
+
"128112": {
|
| 900 |
+
"content": "<|reserved_special_token_104|>",
|
| 901 |
+
"lstrip": false,
|
| 902 |
+
"normalized": false,
|
| 903 |
+
"rstrip": false,
|
| 904 |
+
"single_word": false,
|
| 905 |
+
"special": true
|
| 906 |
+
},
|
| 907 |
+
"128113": {
|
| 908 |
+
"content": "<|reserved_special_token_105|>",
|
| 909 |
+
"lstrip": false,
|
| 910 |
+
"normalized": false,
|
| 911 |
+
"rstrip": false,
|
| 912 |
+
"single_word": false,
|
| 913 |
+
"special": true
|
| 914 |
+
},
|
| 915 |
+
"128114": {
|
| 916 |
+
"content": "<|reserved_special_token_106|>",
|
| 917 |
+
"lstrip": false,
|
| 918 |
+
"normalized": false,
|
| 919 |
+
"rstrip": false,
|
| 920 |
+
"single_word": false,
|
| 921 |
+
"special": true
|
| 922 |
+
},
|
| 923 |
+
"128115": {
|
| 924 |
+
"content": "<|reserved_special_token_107|>",
|
| 925 |
+
"lstrip": false,
|
| 926 |
+
"normalized": false,
|
| 927 |
+
"rstrip": false,
|
| 928 |
+
"single_word": false,
|
| 929 |
+
"special": true
|
| 930 |
+
},
|
| 931 |
+
"128116": {
|
| 932 |
+
"content": "<|reserved_special_token_108|>",
|
| 933 |
+
"lstrip": false,
|
| 934 |
+
"normalized": false,
|
| 935 |
+
"rstrip": false,
|
| 936 |
+
"single_word": false,
|
| 937 |
+
"special": true
|
| 938 |
+
},
|
| 939 |
+
"128117": {
|
| 940 |
+
"content": "<|reserved_special_token_109|>",
|
| 941 |
+
"lstrip": false,
|
| 942 |
+
"normalized": false,
|
| 943 |
+
"rstrip": false,
|
| 944 |
+
"single_word": false,
|
| 945 |
+
"special": true
|
| 946 |
+
},
|
| 947 |
+
"128118": {
|
| 948 |
+
"content": "<|reserved_special_token_110|>",
|
| 949 |
+
"lstrip": false,
|
| 950 |
+
"normalized": false,
|
| 951 |
+
"rstrip": false,
|
| 952 |
+
"single_word": false,
|
| 953 |
+
"special": true
|
| 954 |
+
},
|
| 955 |
+
"128119": {
|
| 956 |
+
"content": "<|reserved_special_token_111|>",
|
| 957 |
+
"lstrip": false,
|
| 958 |
+
"normalized": false,
|
| 959 |
+
"rstrip": false,
|
| 960 |
+
"single_word": false,
|
| 961 |
+
"special": true
|
| 962 |
+
},
|
| 963 |
+
"128120": {
|
| 964 |
+
"content": "<|reserved_special_token_112|>",
|
| 965 |
+
"lstrip": false,
|
| 966 |
+
"normalized": false,
|
| 967 |
+
"rstrip": false,
|
| 968 |
+
"single_word": false,
|
| 969 |
+
"special": true
|
| 970 |
+
},
|
| 971 |
+
"128121": {
|
| 972 |
+
"content": "<|reserved_special_token_113|>",
|
| 973 |
+
"lstrip": false,
|
| 974 |
+
"normalized": false,
|
| 975 |
+
"rstrip": false,
|
| 976 |
+
"single_word": false,
|
| 977 |
+
"special": true
|
| 978 |
+
},
|
| 979 |
+
"128122": {
|
| 980 |
+
"content": "<|reserved_special_token_114|>",
|
| 981 |
+
"lstrip": false,
|
| 982 |
+
"normalized": false,
|
| 983 |
+
"rstrip": false,
|
| 984 |
+
"single_word": false,
|
| 985 |
+
"special": true
|
| 986 |
+
},
|
| 987 |
+
"128123": {
|
| 988 |
+
"content": "<|reserved_special_token_115|>",
|
| 989 |
+
"lstrip": false,
|
| 990 |
+
"normalized": false,
|
| 991 |
+
"rstrip": false,
|
| 992 |
+
"single_word": false,
|
| 993 |
+
"special": true
|
| 994 |
+
},
|
| 995 |
+
"128124": {
|
| 996 |
+
"content": "<|reserved_special_token_116|>",
|
| 997 |
+
"lstrip": false,
|
| 998 |
+
"normalized": false,
|
| 999 |
+
"rstrip": false,
|
| 1000 |
+
"single_word": false,
|
| 1001 |
+
"special": true
|
| 1002 |
+
},
|
| 1003 |
+
"128125": {
|
| 1004 |
+
"content": "<|reserved_special_token_117|>",
|
| 1005 |
+
"lstrip": false,
|
| 1006 |
+
"normalized": false,
|
| 1007 |
+
"rstrip": false,
|
| 1008 |
+
"single_word": false,
|
| 1009 |
+
"special": true
|
| 1010 |
+
},
|
| 1011 |
+
"128126": {
|
| 1012 |
+
"content": "<|reserved_special_token_118|>",
|
| 1013 |
+
"lstrip": false,
|
| 1014 |
+
"normalized": false,
|
| 1015 |
+
"rstrip": false,
|
| 1016 |
+
"single_word": false,
|
| 1017 |
+
"special": true
|
| 1018 |
+
},
|
| 1019 |
+
"128127": {
|
| 1020 |
+
"content": "<|reserved_special_token_119|>",
|
| 1021 |
+
"lstrip": false,
|
| 1022 |
+
"normalized": false,
|
| 1023 |
+
"rstrip": false,
|
| 1024 |
+
"single_word": false,
|
| 1025 |
+
"special": true
|
| 1026 |
+
},
|
| 1027 |
+
"128128": {
|
| 1028 |
+
"content": "<|reserved_special_token_120|>",
|
| 1029 |
+
"lstrip": false,
|
| 1030 |
+
"normalized": false,
|
| 1031 |
+
"rstrip": false,
|
| 1032 |
+
"single_word": false,
|
| 1033 |
+
"special": true
|
| 1034 |
+
},
|
| 1035 |
+
"128129": {
|
| 1036 |
+
"content": "<|reserved_special_token_121|>",
|
| 1037 |
+
"lstrip": false,
|
| 1038 |
+
"normalized": false,
|
| 1039 |
+
"rstrip": false,
|
| 1040 |
+
"single_word": false,
|
| 1041 |
+
"special": true
|
| 1042 |
+
},
|
| 1043 |
+
"128130": {
|
| 1044 |
+
"content": "<|reserved_special_token_122|>",
|
| 1045 |
+
"lstrip": false,
|
| 1046 |
+
"normalized": false,
|
| 1047 |
+
"rstrip": false,
|
| 1048 |
+
"single_word": false,
|
| 1049 |
+
"special": true
|
| 1050 |
+
},
|
| 1051 |
+
"128131": {
|
| 1052 |
+
"content": "<|reserved_special_token_123|>",
|
| 1053 |
+
"lstrip": false,
|
| 1054 |
+
"normalized": false,
|
| 1055 |
+
"rstrip": false,
|
| 1056 |
+
"single_word": false,
|
| 1057 |
+
"special": true
|
| 1058 |
+
},
|
| 1059 |
+
"128132": {
|
| 1060 |
+
"content": "<|reserved_special_token_124|>",
|
| 1061 |
+
"lstrip": false,
|
| 1062 |
+
"normalized": false,
|
| 1063 |
+
"rstrip": false,
|
| 1064 |
+
"single_word": false,
|
| 1065 |
+
"special": true
|
| 1066 |
+
},
|
| 1067 |
+
"128133": {
|
| 1068 |
+
"content": "<|reserved_special_token_125|>",
|
| 1069 |
+
"lstrip": false,
|
| 1070 |
+
"normalized": false,
|
| 1071 |
+
"rstrip": false,
|
| 1072 |
+
"single_word": false,
|
| 1073 |
+
"special": true
|
| 1074 |
+
},
|
| 1075 |
+
"128134": {
|
| 1076 |
+
"content": "<|reserved_special_token_126|>",
|
| 1077 |
+
"lstrip": false,
|
| 1078 |
+
"normalized": false,
|
| 1079 |
+
"rstrip": false,
|
| 1080 |
+
"single_word": false,
|
| 1081 |
+
"special": true
|
| 1082 |
+
},
|
| 1083 |
+
"128135": {
|
| 1084 |
+
"content": "<|reserved_special_token_127|>",
|
| 1085 |
+
"lstrip": false,
|
| 1086 |
+
"normalized": false,
|
| 1087 |
+
"rstrip": false,
|
| 1088 |
+
"single_word": false,
|
| 1089 |
+
"special": true
|
| 1090 |
+
},
|
| 1091 |
+
"128136": {
|
| 1092 |
+
"content": "<|reserved_special_token_128|>",
|
| 1093 |
+
"lstrip": false,
|
| 1094 |
+
"normalized": false,
|
| 1095 |
+
"rstrip": false,
|
| 1096 |
+
"single_word": false,
|
| 1097 |
+
"special": true
|
| 1098 |
+
},
|
| 1099 |
+
"128137": {
|
| 1100 |
+
"content": "<|reserved_special_token_129|>",
|
| 1101 |
+
"lstrip": false,
|
| 1102 |
+
"normalized": false,
|
| 1103 |
+
"rstrip": false,
|
| 1104 |
+
"single_word": false,
|
| 1105 |
+
"special": true
|
| 1106 |
+
},
|
| 1107 |
+
"128138": {
|
| 1108 |
+
"content": "<|reserved_special_token_130|>",
|
| 1109 |
+
"lstrip": false,
|
| 1110 |
+
"normalized": false,
|
| 1111 |
+
"rstrip": false,
|
| 1112 |
+
"single_word": false,
|
| 1113 |
+
"special": true
|
| 1114 |
+
},
|
| 1115 |
+
"128139": {
|
| 1116 |
+
"content": "<|reserved_special_token_131|>",
|
| 1117 |
+
"lstrip": false,
|
| 1118 |
+
"normalized": false,
|
| 1119 |
+
"rstrip": false,
|
| 1120 |
+
"single_word": false,
|
| 1121 |
+
"special": true
|
| 1122 |
+
},
|
| 1123 |
+
"128140": {
|
| 1124 |
+
"content": "<|reserved_special_token_132|>",
|
| 1125 |
+
"lstrip": false,
|
| 1126 |
+
"normalized": false,
|
| 1127 |
+
"rstrip": false,
|
| 1128 |
+
"single_word": false,
|
| 1129 |
+
"special": true
|
| 1130 |
+
},
|
| 1131 |
+
"128141": {
|
| 1132 |
+
"content": "<|reserved_special_token_133|>",
|
| 1133 |
+
"lstrip": false,
|
| 1134 |
+
"normalized": false,
|
| 1135 |
+
"rstrip": false,
|
| 1136 |
+
"single_word": false,
|
| 1137 |
+
"special": true
|
| 1138 |
+
},
|
| 1139 |
+
"128142": {
|
| 1140 |
+
"content": "<|reserved_special_token_134|>",
|
| 1141 |
+
"lstrip": false,
|
| 1142 |
+
"normalized": false,
|
| 1143 |
+
"rstrip": false,
|
| 1144 |
+
"single_word": false,
|
| 1145 |
+
"special": true
|
| 1146 |
+
},
|
| 1147 |
+
"128143": {
|
| 1148 |
+
"content": "<|reserved_special_token_135|>",
|
| 1149 |
+
"lstrip": false,
|
| 1150 |
+
"normalized": false,
|
| 1151 |
+
"rstrip": false,
|
| 1152 |
+
"single_word": false,
|
| 1153 |
+
"special": true
|
| 1154 |
+
},
|
| 1155 |
+
"128144": {
|
| 1156 |
+
"content": "<|reserved_special_token_136|>",
|
| 1157 |
+
"lstrip": false,
|
| 1158 |
+
"normalized": false,
|
| 1159 |
+
"rstrip": false,
|
| 1160 |
+
"single_word": false,
|
| 1161 |
+
"special": true
|
| 1162 |
+
},
|
| 1163 |
+
"128145": {
|
| 1164 |
+
"content": "<|reserved_special_token_137|>",
|
| 1165 |
+
"lstrip": false,
|
| 1166 |
+
"normalized": false,
|
| 1167 |
+
"rstrip": false,
|
| 1168 |
+
"single_word": false,
|
| 1169 |
+
"special": true
|
| 1170 |
+
},
|
| 1171 |
+
"128146": {
|
| 1172 |
+
"content": "<|reserved_special_token_138|>",
|
| 1173 |
+
"lstrip": false,
|
| 1174 |
+
"normalized": false,
|
| 1175 |
+
"rstrip": false,
|
| 1176 |
+
"single_word": false,
|
| 1177 |
+
"special": true
|
| 1178 |
+
},
|
| 1179 |
+
"128147": {
|
| 1180 |
+
"content": "<|reserved_special_token_139|>",
|
| 1181 |
+
"lstrip": false,
|
| 1182 |
+
"normalized": false,
|
| 1183 |
+
"rstrip": false,
|
| 1184 |
+
"single_word": false,
|
| 1185 |
+
"special": true
|
| 1186 |
+
},
|
| 1187 |
+
"128148": {
|
| 1188 |
+
"content": "<|reserved_special_token_140|>",
|
| 1189 |
+
"lstrip": false,
|
| 1190 |
+
"normalized": false,
|
| 1191 |
+
"rstrip": false,
|
| 1192 |
+
"single_word": false,
|
| 1193 |
+
"special": true
|
| 1194 |
+
},
|
| 1195 |
+
"128149": {
|
| 1196 |
+
"content": "<|reserved_special_token_141|>",
|
| 1197 |
+
"lstrip": false,
|
| 1198 |
+
"normalized": false,
|
| 1199 |
+
"rstrip": false,
|
| 1200 |
+
"single_word": false,
|
| 1201 |
+
"special": true
|
| 1202 |
+
},
|
| 1203 |
+
"128150": {
|
| 1204 |
+
"content": "<|reserved_special_token_142|>",
|
| 1205 |
+
"lstrip": false,
|
| 1206 |
+
"normalized": false,
|
| 1207 |
+
"rstrip": false,
|
| 1208 |
+
"single_word": false,
|
| 1209 |
+
"special": true
|
| 1210 |
+
},
|
| 1211 |
+
"128151": {
|
| 1212 |
+
"content": "<|reserved_special_token_143|>",
|
| 1213 |
+
"lstrip": false,
|
| 1214 |
+
"normalized": false,
|
| 1215 |
+
"rstrip": false,
|
| 1216 |
+
"single_word": false,
|
| 1217 |
+
"special": true
|
| 1218 |
+
},
|
| 1219 |
+
"128152": {
|
| 1220 |
+
"content": "<|reserved_special_token_144|>",
|
| 1221 |
+
"lstrip": false,
|
| 1222 |
+
"normalized": false,
|
| 1223 |
+
"rstrip": false,
|
| 1224 |
+
"single_word": false,
|
| 1225 |
+
"special": true
|
| 1226 |
+
},
|
| 1227 |
+
"128153": {
|
| 1228 |
+
"content": "<|reserved_special_token_145|>",
|
| 1229 |
+
"lstrip": false,
|
| 1230 |
+
"normalized": false,
|
| 1231 |
+
"rstrip": false,
|
| 1232 |
+
"single_word": false,
|
| 1233 |
+
"special": true
|
| 1234 |
+
},
|
| 1235 |
+
"128154": {
|
| 1236 |
+
"content": "<|reserved_special_token_146|>",
|
| 1237 |
+
"lstrip": false,
|
| 1238 |
+
"normalized": false,
|
| 1239 |
+
"rstrip": false,
|
| 1240 |
+
"single_word": false,
|
| 1241 |
+
"special": true
|
| 1242 |
+
},
|
| 1243 |
+
"128155": {
|
| 1244 |
+
"content": "<|reserved_special_token_147|>",
|
| 1245 |
+
"lstrip": false,
|
| 1246 |
+
"normalized": false,
|
| 1247 |
+
"rstrip": false,
|
| 1248 |
+
"single_word": false,
|
| 1249 |
+
"special": true
|
| 1250 |
+
},
|
| 1251 |
+
"128156": {
|
| 1252 |
+
"content": "<|reserved_special_token_148|>",
|
| 1253 |
+
"lstrip": false,
|
| 1254 |
+
"normalized": false,
|
| 1255 |
+
"rstrip": false,
|
| 1256 |
+
"single_word": false,
|
| 1257 |
+
"special": true
|
| 1258 |
+
},
|
| 1259 |
+
"128157": {
|
| 1260 |
+
"content": "<|reserved_special_token_149|>",
|
| 1261 |
+
"lstrip": false,
|
| 1262 |
+
"normalized": false,
|
| 1263 |
+
"rstrip": false,
|
| 1264 |
+
"single_word": false,
|
| 1265 |
+
"special": true
|
| 1266 |
+
},
|
| 1267 |
+
"128158": {
|
| 1268 |
+
"content": "<|reserved_special_token_150|>",
|
| 1269 |
+
"lstrip": false,
|
| 1270 |
+
"normalized": false,
|
| 1271 |
+
"rstrip": false,
|
| 1272 |
+
"single_word": false,
|
| 1273 |
+
"special": true
|
| 1274 |
+
},
|
| 1275 |
+
"128159": {
|
| 1276 |
+
"content": "<|reserved_special_token_151|>",
|
| 1277 |
+
"lstrip": false,
|
| 1278 |
+
"normalized": false,
|
| 1279 |
+
"rstrip": false,
|
| 1280 |
+
"single_word": false,
|
| 1281 |
+
"special": true
|
| 1282 |
+
},
|
| 1283 |
+
"128160": {
|
| 1284 |
+
"content": "<|reserved_special_token_152|>",
|
| 1285 |
+
"lstrip": false,
|
| 1286 |
+
"normalized": false,
|
| 1287 |
+
"rstrip": false,
|
| 1288 |
+
"single_word": false,
|
| 1289 |
+
"special": true
|
| 1290 |
+
},
|
| 1291 |
+
"128161": {
|
| 1292 |
+
"content": "<|reserved_special_token_153|>",
|
| 1293 |
+
"lstrip": false,
|
| 1294 |
+
"normalized": false,
|
| 1295 |
+
"rstrip": false,
|
| 1296 |
+
"single_word": false,
|
| 1297 |
+
"special": true
|
| 1298 |
+
},
|
| 1299 |
+
"128162": {
|
| 1300 |
+
"content": "<|reserved_special_token_154|>",
|
| 1301 |
+
"lstrip": false,
|
| 1302 |
+
"normalized": false,
|
| 1303 |
+
"rstrip": false,
|
| 1304 |
+
"single_word": false,
|
| 1305 |
+
"special": true
|
| 1306 |
+
},
|
| 1307 |
+
"128163": {
|
| 1308 |
+
"content": "<|reserved_special_token_155|>",
|
| 1309 |
+
"lstrip": false,
|
| 1310 |
+
"normalized": false,
|
| 1311 |
+
"rstrip": false,
|
| 1312 |
+
"single_word": false,
|
| 1313 |
+
"special": true
|
| 1314 |
+
},
|
| 1315 |
+
"128164": {
|
| 1316 |
+
"content": "<|reserved_special_token_156|>",
|
| 1317 |
+
"lstrip": false,
|
| 1318 |
+
"normalized": false,
|
| 1319 |
+
"rstrip": false,
|
| 1320 |
+
"single_word": false,
|
| 1321 |
+
"special": true
|
| 1322 |
+
},
|
| 1323 |
+
"128165": {
|
| 1324 |
+
"content": "<|reserved_special_token_157|>",
|
| 1325 |
+
"lstrip": false,
|
| 1326 |
+
"normalized": false,
|
| 1327 |
+
"rstrip": false,
|
| 1328 |
+
"single_word": false,
|
| 1329 |
+
"special": true
|
| 1330 |
+
},
|
| 1331 |
+
"128166": {
|
| 1332 |
+
"content": "<|reserved_special_token_158|>",
|
| 1333 |
+
"lstrip": false,
|
| 1334 |
+
"normalized": false,
|
| 1335 |
+
"rstrip": false,
|
| 1336 |
+
"single_word": false,
|
| 1337 |
+
"special": true
|
| 1338 |
+
},
|
| 1339 |
+
"128167": {
|
| 1340 |
+
"content": "<|reserved_special_token_159|>",
|
| 1341 |
+
"lstrip": false,
|
| 1342 |
+
"normalized": false,
|
| 1343 |
+
"rstrip": false,
|
| 1344 |
+
"single_word": false,
|
| 1345 |
+
"special": true
|
| 1346 |
+
},
|
| 1347 |
+
"128168": {
|
| 1348 |
+
"content": "<|reserved_special_token_160|>",
|
| 1349 |
+
"lstrip": false,
|
| 1350 |
+
"normalized": false,
|
| 1351 |
+
"rstrip": false,
|
| 1352 |
+
"single_word": false,
|
| 1353 |
+
"special": true
|
| 1354 |
+
},
|
| 1355 |
+
"128169": {
|
| 1356 |
+
"content": "<|reserved_special_token_161|>",
|
| 1357 |
+
"lstrip": false,
|
| 1358 |
+
"normalized": false,
|
| 1359 |
+
"rstrip": false,
|
| 1360 |
+
"single_word": false,
|
| 1361 |
+
"special": true
|
| 1362 |
+
},
|
| 1363 |
+
"128170": {
|
| 1364 |
+
"content": "<|reserved_special_token_162|>",
|
| 1365 |
+
"lstrip": false,
|
| 1366 |
+
"normalized": false,
|
| 1367 |
+
"rstrip": false,
|
| 1368 |
+
"single_word": false,
|
| 1369 |
+
"special": true
|
| 1370 |
+
},
|
| 1371 |
+
"128171": {
|
| 1372 |
+
"content": "<|reserved_special_token_163|>",
|
| 1373 |
+
"lstrip": false,
|
| 1374 |
+
"normalized": false,
|
| 1375 |
+
"rstrip": false,
|
| 1376 |
+
"single_word": false,
|
| 1377 |
+
"special": true
|
| 1378 |
+
},
|
| 1379 |
+
"128172": {
|
| 1380 |
+
"content": "<|reserved_special_token_164|>",
|
| 1381 |
+
"lstrip": false,
|
| 1382 |
+
"normalized": false,
|
| 1383 |
+
"rstrip": false,
|
| 1384 |
+
"single_word": false,
|
| 1385 |
+
"special": true
|
| 1386 |
+
},
|
| 1387 |
+
"128173": {
|
| 1388 |
+
"content": "<|reserved_special_token_165|>",
|
| 1389 |
+
"lstrip": false,
|
| 1390 |
+
"normalized": false,
|
| 1391 |
+
"rstrip": false,
|
| 1392 |
+
"single_word": false,
|
| 1393 |
+
"special": true
|
| 1394 |
+
},
|
| 1395 |
+
"128174": {
|
| 1396 |
+
"content": "<|reserved_special_token_166|>",
|
| 1397 |
+
"lstrip": false,
|
| 1398 |
+
"normalized": false,
|
| 1399 |
+
"rstrip": false,
|
| 1400 |
+
"single_word": false,
|
| 1401 |
+
"special": true
|
| 1402 |
+
},
|
| 1403 |
+
"128175": {
|
| 1404 |
+
"content": "<|reserved_special_token_167|>",
|
| 1405 |
+
"lstrip": false,
|
| 1406 |
+
"normalized": false,
|
| 1407 |
+
"rstrip": false,
|
| 1408 |
+
"single_word": false,
|
| 1409 |
+
"special": true
|
| 1410 |
+
},
|
| 1411 |
+
"128176": {
|
| 1412 |
+
"content": "<|reserved_special_token_168|>",
|
| 1413 |
+
"lstrip": false,
|
| 1414 |
+
"normalized": false,
|
| 1415 |
+
"rstrip": false,
|
| 1416 |
+
"single_word": false,
|
| 1417 |
+
"special": true
|
| 1418 |
+
},
|
| 1419 |
+
"128177": {
|
| 1420 |
+
"content": "<|reserved_special_token_169|>",
|
| 1421 |
+
"lstrip": false,
|
| 1422 |
+
"normalized": false,
|
| 1423 |
+
"rstrip": false,
|
| 1424 |
+
"single_word": false,
|
| 1425 |
+
"special": true
|
| 1426 |
+
},
|
| 1427 |
+
"128178": {
|
| 1428 |
+
"content": "<|reserved_special_token_170|>",
|
| 1429 |
+
"lstrip": false,
|
| 1430 |
+
"normalized": false,
|
| 1431 |
+
"rstrip": false,
|
| 1432 |
+
"single_word": false,
|
| 1433 |
+
"special": true
|
| 1434 |
+
},
|
| 1435 |
+
"128179": {
|
| 1436 |
+
"content": "<|reserved_special_token_171|>",
|
| 1437 |
+
"lstrip": false,
|
| 1438 |
+
"normalized": false,
|
| 1439 |
+
"rstrip": false,
|
| 1440 |
+
"single_word": false,
|
| 1441 |
+
"special": true
|
| 1442 |
+
},
|
| 1443 |
+
"128180": {
|
| 1444 |
+
"content": "<|reserved_special_token_172|>",
|
| 1445 |
+
"lstrip": false,
|
| 1446 |
+
"normalized": false,
|
| 1447 |
+
"rstrip": false,
|
| 1448 |
+
"single_word": false,
|
| 1449 |
+
"special": true
|
| 1450 |
+
},
|
| 1451 |
+
"128181": {
|
| 1452 |
+
"content": "<|reserved_special_token_173|>",
|
| 1453 |
+
"lstrip": false,
|
| 1454 |
+
"normalized": false,
|
| 1455 |
+
"rstrip": false,
|
| 1456 |
+
"single_word": false,
|
| 1457 |
+
"special": true
|
| 1458 |
+
},
|
| 1459 |
+
"128182": {
|
| 1460 |
+
"content": "<|reserved_special_token_174|>",
|
| 1461 |
+
"lstrip": false,
|
| 1462 |
+
"normalized": false,
|
| 1463 |
+
"rstrip": false,
|
| 1464 |
+
"single_word": false,
|
| 1465 |
+
"special": true
|
| 1466 |
+
},
|
| 1467 |
+
"128183": {
|
| 1468 |
+
"content": "<|reserved_special_token_175|>",
|
| 1469 |
+
"lstrip": false,
|
| 1470 |
+
"normalized": false,
|
| 1471 |
+
"rstrip": false,
|
| 1472 |
+
"single_word": false,
|
| 1473 |
+
"special": true
|
| 1474 |
+
},
|
| 1475 |
+
"128184": {
|
| 1476 |
+
"content": "<|reserved_special_token_176|>",
|
| 1477 |
+
"lstrip": false,
|
| 1478 |
+
"normalized": false,
|
| 1479 |
+
"rstrip": false,
|
| 1480 |
+
"single_word": false,
|
| 1481 |
+
"special": true
|
| 1482 |
+
},
|
| 1483 |
+
"128185": {
|
| 1484 |
+
"content": "<|reserved_special_token_177|>",
|
| 1485 |
+
"lstrip": false,
|
| 1486 |
+
"normalized": false,
|
| 1487 |
+
"rstrip": false,
|
| 1488 |
+
"single_word": false,
|
| 1489 |
+
"special": true
|
| 1490 |
+
},
|
| 1491 |
+
"128186": {
|
| 1492 |
+
"content": "<|reserved_special_token_178|>",
|
| 1493 |
+
"lstrip": false,
|
| 1494 |
+
"normalized": false,
|
| 1495 |
+
"rstrip": false,
|
| 1496 |
+
"single_word": false,
|
| 1497 |
+
"special": true
|
| 1498 |
+
},
|
| 1499 |
+
"128187": {
|
| 1500 |
+
"content": "<|reserved_special_token_179|>",
|
| 1501 |
+
"lstrip": false,
|
| 1502 |
+
"normalized": false,
|
| 1503 |
+
"rstrip": false,
|
| 1504 |
+
"single_word": false,
|
| 1505 |
+
"special": true
|
| 1506 |
+
},
|
| 1507 |
+
"128188": {
|
| 1508 |
+
"content": "<|reserved_special_token_180|>",
|
| 1509 |
+
"lstrip": false,
|
| 1510 |
+
"normalized": false,
|
| 1511 |
+
"rstrip": false,
|
| 1512 |
+
"single_word": false,
|
| 1513 |
+
"special": true
|
| 1514 |
+
},
|
| 1515 |
+
"128189": {
|
| 1516 |
+
"content": "<|reserved_special_token_181|>",
|
| 1517 |
+
"lstrip": false,
|
| 1518 |
+
"normalized": false,
|
| 1519 |
+
"rstrip": false,
|
| 1520 |
+
"single_word": false,
|
| 1521 |
+
"special": true
|
| 1522 |
+
},
|
| 1523 |
+
"128190": {
|
| 1524 |
+
"content": "<|reserved_special_token_182|>",
|
| 1525 |
+
"lstrip": false,
|
| 1526 |
+
"normalized": false,
|
| 1527 |
+
"rstrip": false,
|
| 1528 |
+
"single_word": false,
|
| 1529 |
+
"special": true
|
| 1530 |
+
},
|
| 1531 |
+
"128191": {
|
| 1532 |
+
"content": "<|reserved_special_token_183|>",
|
| 1533 |
+
"lstrip": false,
|
| 1534 |
+
"normalized": false,
|
| 1535 |
+
"rstrip": false,
|
| 1536 |
+
"single_word": false,
|
| 1537 |
+
"special": true
|
| 1538 |
+
},
|
| 1539 |
+
"128192": {
|
| 1540 |
+
"content": "<|reserved_special_token_184|>",
|
| 1541 |
+
"lstrip": false,
|
| 1542 |
+
"normalized": false,
|
| 1543 |
+
"rstrip": false,
|
| 1544 |
+
"single_word": false,
|
| 1545 |
+
"special": true
|
| 1546 |
+
},
|
| 1547 |
+
"128193": {
|
| 1548 |
+
"content": "<|reserved_special_token_185|>",
|
| 1549 |
+
"lstrip": false,
|
| 1550 |
+
"normalized": false,
|
| 1551 |
+
"rstrip": false,
|
| 1552 |
+
"single_word": false,
|
| 1553 |
+
"special": true
|
| 1554 |
+
},
|
| 1555 |
+
"128194": {
|
| 1556 |
+
"content": "<|reserved_special_token_186|>",
|
| 1557 |
+
"lstrip": false,
|
| 1558 |
+
"normalized": false,
|
| 1559 |
+
"rstrip": false,
|
| 1560 |
+
"single_word": false,
|
| 1561 |
+
"special": true
|
| 1562 |
+
},
|
| 1563 |
+
"128195": {
|
| 1564 |
+
"content": "<|reserved_special_token_187|>",
|
| 1565 |
+
"lstrip": false,
|
| 1566 |
+
"normalized": false,
|
| 1567 |
+
"rstrip": false,
|
| 1568 |
+
"single_word": false,
|
| 1569 |
+
"special": true
|
| 1570 |
+
},
|
| 1571 |
+
"128196": {
|
| 1572 |
+
"content": "<|reserved_special_token_188|>",
|
| 1573 |
+
"lstrip": false,
|
| 1574 |
+
"normalized": false,
|
| 1575 |
+
"rstrip": false,
|
| 1576 |
+
"single_word": false,
|
| 1577 |
+
"special": true
|
| 1578 |
+
},
|
| 1579 |
+
"128197": {
|
| 1580 |
+
"content": "<|reserved_special_token_189|>",
|
| 1581 |
+
"lstrip": false,
|
| 1582 |
+
"normalized": false,
|
| 1583 |
+
"rstrip": false,
|
| 1584 |
+
"single_word": false,
|
| 1585 |
+
"special": true
|
| 1586 |
+
},
|
| 1587 |
+
"128198": {
|
| 1588 |
+
"content": "<|reserved_special_token_190|>",
|
| 1589 |
+
"lstrip": false,
|
| 1590 |
+
"normalized": false,
|
| 1591 |
+
"rstrip": false,
|
| 1592 |
+
"single_word": false,
|
| 1593 |
+
"special": true
|
| 1594 |
+
},
|
| 1595 |
+
"128199": {
|
| 1596 |
+
"content": "<|reserved_special_token_191|>",
|
| 1597 |
+
"lstrip": false,
|
| 1598 |
+
"normalized": false,
|
| 1599 |
+
"rstrip": false,
|
| 1600 |
+
"single_word": false,
|
| 1601 |
+
"special": true
|
| 1602 |
+
},
|
| 1603 |
+
"128200": {
|
| 1604 |
+
"content": "<|reserved_special_token_192|>",
|
| 1605 |
+
"lstrip": false,
|
| 1606 |
+
"normalized": false,
|
| 1607 |
+
"rstrip": false,
|
| 1608 |
+
"single_word": false,
|
| 1609 |
+
"special": true
|
| 1610 |
+
},
|
| 1611 |
+
"128201": {
|
| 1612 |
+
"content": "<|reserved_special_token_193|>",
|
| 1613 |
+
"lstrip": false,
|
| 1614 |
+
"normalized": false,
|
| 1615 |
+
"rstrip": false,
|
| 1616 |
+
"single_word": false,
|
| 1617 |
+
"special": true
|
| 1618 |
+
},
|
| 1619 |
+
"128202": {
|
| 1620 |
+
"content": "<|reserved_special_token_194|>",
|
| 1621 |
+
"lstrip": false,
|
| 1622 |
+
"normalized": false,
|
| 1623 |
+
"rstrip": false,
|
| 1624 |
+
"single_word": false,
|
| 1625 |
+
"special": true
|
| 1626 |
+
},
|
| 1627 |
+
"128203": {
|
| 1628 |
+
"content": "<|reserved_special_token_195|>",
|
| 1629 |
+
"lstrip": false,
|
| 1630 |
+
"normalized": false,
|
| 1631 |
+
"rstrip": false,
|
| 1632 |
+
"single_word": false,
|
| 1633 |
+
"special": true
|
| 1634 |
+
},
|
| 1635 |
+
"128204": {
|
| 1636 |
+
"content": "<|reserved_special_token_196|>",
|
| 1637 |
+
"lstrip": false,
|
| 1638 |
+
"normalized": false,
|
| 1639 |
+
"rstrip": false,
|
| 1640 |
+
"single_word": false,
|
| 1641 |
+
"special": true
|
| 1642 |
+
},
|
| 1643 |
+
"128205": {
|
| 1644 |
+
"content": "<|reserved_special_token_197|>",
|
| 1645 |
+
"lstrip": false,
|
| 1646 |
+
"normalized": false,
|
| 1647 |
+
"rstrip": false,
|
| 1648 |
+
"single_word": false,
|
| 1649 |
+
"special": true
|
| 1650 |
+
},
|
| 1651 |
+
"128206": {
|
| 1652 |
+
"content": "<|reserved_special_token_198|>",
|
| 1653 |
+
"lstrip": false,
|
| 1654 |
+
"normalized": false,
|
| 1655 |
+
"rstrip": false,
|
| 1656 |
+
"single_word": false,
|
| 1657 |
+
"special": true
|
| 1658 |
+
},
|
| 1659 |
+
"128207": {
|
| 1660 |
+
"content": "<|reserved_special_token_199|>",
|
| 1661 |
+
"lstrip": false,
|
| 1662 |
+
"normalized": false,
|
| 1663 |
+
"rstrip": false,
|
| 1664 |
+
"single_word": false,
|
| 1665 |
+
"special": true
|
| 1666 |
+
},
|
| 1667 |
+
"128208": {
|
| 1668 |
+
"content": "<|reserved_special_token_200|>",
|
| 1669 |
+
"lstrip": false,
|
| 1670 |
+
"normalized": false,
|
| 1671 |
+
"rstrip": false,
|
| 1672 |
+
"single_word": false,
|
| 1673 |
+
"special": true
|
| 1674 |
+
},
|
| 1675 |
+
"128209": {
|
| 1676 |
+
"content": "<|reserved_special_token_201|>",
|
| 1677 |
+
"lstrip": false,
|
| 1678 |
+
"normalized": false,
|
| 1679 |
+
"rstrip": false,
|
| 1680 |
+
"single_word": false,
|
| 1681 |
+
"special": true
|
| 1682 |
+
},
|
| 1683 |
+
"128210": {
|
| 1684 |
+
"content": "<|reserved_special_token_202|>",
|
| 1685 |
+
"lstrip": false,
|
| 1686 |
+
"normalized": false,
|
| 1687 |
+
"rstrip": false,
|
| 1688 |
+
"single_word": false,
|
| 1689 |
+
"special": true
|
| 1690 |
+
},
|
| 1691 |
+
"128211": {
|
| 1692 |
+
"content": "<|reserved_special_token_203|>",
|
| 1693 |
+
"lstrip": false,
|
| 1694 |
+
"normalized": false,
|
| 1695 |
+
"rstrip": false,
|
| 1696 |
+
"single_word": false,
|
| 1697 |
+
"special": true
|
| 1698 |
+
},
|
| 1699 |
+
"128212": {
|
| 1700 |
+
"content": "<|reserved_special_token_204|>",
|
| 1701 |
+
"lstrip": false,
|
| 1702 |
+
"normalized": false,
|
| 1703 |
+
"rstrip": false,
|
| 1704 |
+
"single_word": false,
|
| 1705 |
+
"special": true
|
| 1706 |
+
},
|
| 1707 |
+
"128213": {
|
| 1708 |
+
"content": "<|reserved_special_token_205|>",
|
| 1709 |
+
"lstrip": false,
|
| 1710 |
+
"normalized": false,
|
| 1711 |
+
"rstrip": false,
|
| 1712 |
+
"single_word": false,
|
| 1713 |
+
"special": true
|
| 1714 |
+
},
|
| 1715 |
+
"128214": {
|
| 1716 |
+
"content": "<|reserved_special_token_206|>",
|
| 1717 |
+
"lstrip": false,
|
| 1718 |
+
"normalized": false,
|
| 1719 |
+
"rstrip": false,
|
| 1720 |
+
"single_word": false,
|
| 1721 |
+
"special": true
|
| 1722 |
+
},
|
| 1723 |
+
"128215": {
|
| 1724 |
+
"content": "<|reserved_special_token_207|>",
|
| 1725 |
+
"lstrip": false,
|
| 1726 |
+
"normalized": false,
|
| 1727 |
+
"rstrip": false,
|
| 1728 |
+
"single_word": false,
|
| 1729 |
+
"special": true
|
| 1730 |
+
},
|
| 1731 |
+
"128216": {
|
| 1732 |
+
"content": "<|reserved_special_token_208|>",
|
| 1733 |
+
"lstrip": false,
|
| 1734 |
+
"normalized": false,
|
| 1735 |
+
"rstrip": false,
|
| 1736 |
+
"single_word": false,
|
| 1737 |
+
"special": true
|
| 1738 |
+
},
|
| 1739 |
+
"128217": {
|
| 1740 |
+
"content": "<|reserved_special_token_209|>",
|
| 1741 |
+
"lstrip": false,
|
| 1742 |
+
"normalized": false,
|
| 1743 |
+
"rstrip": false,
|
| 1744 |
+
"single_word": false,
|
| 1745 |
+
"special": true
|
| 1746 |
+
},
|
| 1747 |
+
"128218": {
|
| 1748 |
+
"content": "<|reserved_special_token_210|>",
|
| 1749 |
+
"lstrip": false,
|
| 1750 |
+
"normalized": false,
|
| 1751 |
+
"rstrip": false,
|
| 1752 |
+
"single_word": false,
|
| 1753 |
+
"special": true
|
| 1754 |
+
},
|
| 1755 |
+
"128219": {
|
| 1756 |
+
"content": "<|reserved_special_token_211|>",
|
| 1757 |
+
"lstrip": false,
|
| 1758 |
+
"normalized": false,
|
| 1759 |
+
"rstrip": false,
|
| 1760 |
+
"single_word": false,
|
| 1761 |
+
"special": true
|
| 1762 |
+
},
|
| 1763 |
+
"128220": {
|
| 1764 |
+
"content": "<|reserved_special_token_212|>",
|
| 1765 |
+
"lstrip": false,
|
| 1766 |
+
"normalized": false,
|
| 1767 |
+
"rstrip": false,
|
| 1768 |
+
"single_word": false,
|
| 1769 |
+
"special": true
|
| 1770 |
+
},
|
| 1771 |
+
"128221": {
|
| 1772 |
+
"content": "<|reserved_special_token_213|>",
|
| 1773 |
+
"lstrip": false,
|
| 1774 |
+
"normalized": false,
|
| 1775 |
+
"rstrip": false,
|
| 1776 |
+
"single_word": false,
|
| 1777 |
+
"special": true
|
| 1778 |
+
},
|
| 1779 |
+
"128222": {
|
| 1780 |
+
"content": "<|reserved_special_token_214|>",
|
| 1781 |
+
"lstrip": false,
|
| 1782 |
+
"normalized": false,
|
| 1783 |
+
"rstrip": false,
|
| 1784 |
+
"single_word": false,
|
| 1785 |
+
"special": true
|
| 1786 |
+
},
|
| 1787 |
+
"128223": {
|
| 1788 |
+
"content": "<|reserved_special_token_215|>",
|
| 1789 |
+
"lstrip": false,
|
| 1790 |
+
"normalized": false,
|
| 1791 |
+
"rstrip": false,
|
| 1792 |
+
"single_word": false,
|
| 1793 |
+
"special": true
|
| 1794 |
+
},
|
| 1795 |
+
"128224": {
|
| 1796 |
+
"content": "<|reserved_special_token_216|>",
|
| 1797 |
+
"lstrip": false,
|
| 1798 |
+
"normalized": false,
|
| 1799 |
+
"rstrip": false,
|
| 1800 |
+
"single_word": false,
|
| 1801 |
+
"special": true
|
| 1802 |
+
},
|
| 1803 |
+
"128225": {
|
| 1804 |
+
"content": "<|reserved_special_token_217|>",
|
| 1805 |
+
"lstrip": false,
|
| 1806 |
+
"normalized": false,
|
| 1807 |
+
"rstrip": false,
|
| 1808 |
+
"single_word": false,
|
| 1809 |
+
"special": true
|
| 1810 |
+
},
|
| 1811 |
+
"128226": {
|
| 1812 |
+
"content": "<|reserved_special_token_218|>",
|
| 1813 |
+
"lstrip": false,
|
| 1814 |
+
"normalized": false,
|
| 1815 |
+
"rstrip": false,
|
| 1816 |
+
"single_word": false,
|
| 1817 |
+
"special": true
|
| 1818 |
+
},
|
| 1819 |
+
"128227": {
|
| 1820 |
+
"content": "<|reserved_special_token_219|>",
|
| 1821 |
+
"lstrip": false,
|
| 1822 |
+
"normalized": false,
|
| 1823 |
+
"rstrip": false,
|
| 1824 |
+
"single_word": false,
|
| 1825 |
+
"special": true
|
| 1826 |
+
},
|
| 1827 |
+
"128228": {
|
| 1828 |
+
"content": "<|reserved_special_token_220|>",
|
| 1829 |
+
"lstrip": false,
|
| 1830 |
+
"normalized": false,
|
| 1831 |
+
"rstrip": false,
|
| 1832 |
+
"single_word": false,
|
| 1833 |
+
"special": true
|
| 1834 |
+
},
|
| 1835 |
+
"128229": {
|
| 1836 |
+
"content": "<|reserved_special_token_221|>",
|
| 1837 |
+
"lstrip": false,
|
| 1838 |
+
"normalized": false,
|
| 1839 |
+
"rstrip": false,
|
| 1840 |
+
"single_word": false,
|
| 1841 |
+
"special": true
|
| 1842 |
+
},
|
| 1843 |
+
"128230": {
|
| 1844 |
+
"content": "<|reserved_special_token_222|>",
|
| 1845 |
+
"lstrip": false,
|
| 1846 |
+
"normalized": false,
|
| 1847 |
+
"rstrip": false,
|
| 1848 |
+
"single_word": false,
|
| 1849 |
+
"special": true
|
| 1850 |
+
},
|
| 1851 |
+
"128231": {
|
| 1852 |
+
"content": "<|reserved_special_token_223|>",
|
| 1853 |
+
"lstrip": false,
|
| 1854 |
+
"normalized": false,
|
| 1855 |
+
"rstrip": false,
|
| 1856 |
+
"single_word": false,
|
| 1857 |
+
"special": true
|
| 1858 |
+
},
|
| 1859 |
+
"128232": {
|
| 1860 |
+
"content": "<|reserved_special_token_224|>",
|
| 1861 |
+
"lstrip": false,
|
| 1862 |
+
"normalized": false,
|
| 1863 |
+
"rstrip": false,
|
| 1864 |
+
"single_word": false,
|
| 1865 |
+
"special": true
|
| 1866 |
+
},
|
| 1867 |
+
"128233": {
|
| 1868 |
+
"content": "<|reserved_special_token_225|>",
|
| 1869 |
+
"lstrip": false,
|
| 1870 |
+
"normalized": false,
|
| 1871 |
+
"rstrip": false,
|
| 1872 |
+
"single_word": false,
|
| 1873 |
+
"special": true
|
| 1874 |
+
},
|
| 1875 |
+
"128234": {
|
| 1876 |
+
"content": "<|reserved_special_token_226|>",
|
| 1877 |
+
"lstrip": false,
|
| 1878 |
+
"normalized": false,
|
| 1879 |
+
"rstrip": false,
|
| 1880 |
+
"single_word": false,
|
| 1881 |
+
"special": true
|
| 1882 |
+
},
|
| 1883 |
+
"128235": {
|
| 1884 |
+
"content": "<|reserved_special_token_227|>",
|
| 1885 |
+
"lstrip": false,
|
| 1886 |
+
"normalized": false,
|
| 1887 |
+
"rstrip": false,
|
| 1888 |
+
"single_word": false,
|
| 1889 |
+
"special": true
|
| 1890 |
+
},
|
| 1891 |
+
"128236": {
|
| 1892 |
+
"content": "<|reserved_special_token_228|>",
|
| 1893 |
+
"lstrip": false,
|
| 1894 |
+
"normalized": false,
|
| 1895 |
+
"rstrip": false,
|
| 1896 |
+
"single_word": false,
|
| 1897 |
+
"special": true
|
| 1898 |
+
},
|
| 1899 |
+
"128237": {
|
| 1900 |
+
"content": "<|reserved_special_token_229|>",
|
| 1901 |
+
"lstrip": false,
|
| 1902 |
+
"normalized": false,
|
| 1903 |
+
"rstrip": false,
|
| 1904 |
+
"single_word": false,
|
| 1905 |
+
"special": true
|
| 1906 |
+
},
|
| 1907 |
+
"128238": {
|
| 1908 |
+
"content": "<|reserved_special_token_230|>",
|
| 1909 |
+
"lstrip": false,
|
| 1910 |
+
"normalized": false,
|
| 1911 |
+
"rstrip": false,
|
| 1912 |
+
"single_word": false,
|
| 1913 |
+
"special": true
|
| 1914 |
+
},
|
| 1915 |
+
"128239": {
|
| 1916 |
+
"content": "<|reserved_special_token_231|>",
|
| 1917 |
+
"lstrip": false,
|
| 1918 |
+
"normalized": false,
|
| 1919 |
+
"rstrip": false,
|
| 1920 |
+
"single_word": false,
|
| 1921 |
+
"special": true
|
| 1922 |
+
},
|
| 1923 |
+
"128240": {
|
| 1924 |
+
"content": "<|reserved_special_token_232|>",
|
| 1925 |
+
"lstrip": false,
|
| 1926 |
+
"normalized": false,
|
| 1927 |
+
"rstrip": false,
|
| 1928 |
+
"single_word": false,
|
| 1929 |
+
"special": true
|
| 1930 |
+
},
|
| 1931 |
+
"128241": {
|
| 1932 |
+
"content": "<|reserved_special_token_233|>",
|
| 1933 |
+
"lstrip": false,
|
| 1934 |
+
"normalized": false,
|
| 1935 |
+
"rstrip": false,
|
| 1936 |
+
"single_word": false,
|
| 1937 |
+
"special": true
|
| 1938 |
+
},
|
| 1939 |
+
"128242": {
|
| 1940 |
+
"content": "<|reserved_special_token_234|>",
|
| 1941 |
+
"lstrip": false,
|
| 1942 |
+
"normalized": false,
|
| 1943 |
+
"rstrip": false,
|
| 1944 |
+
"single_word": false,
|
| 1945 |
+
"special": true
|
| 1946 |
+
},
|
| 1947 |
+
"128243": {
|
| 1948 |
+
"content": "<|reserved_special_token_235|>",
|
| 1949 |
+
"lstrip": false,
|
| 1950 |
+
"normalized": false,
|
| 1951 |
+
"rstrip": false,
|
| 1952 |
+
"single_word": false,
|
| 1953 |
+
"special": true
|
| 1954 |
+
},
|
| 1955 |
+
"128244": {
|
| 1956 |
+
"content": "<|reserved_special_token_236|>",
|
| 1957 |
+
"lstrip": false,
|
| 1958 |
+
"normalized": false,
|
| 1959 |
+
"rstrip": false,
|
| 1960 |
+
"single_word": false,
|
| 1961 |
+
"special": true
|
| 1962 |
+
},
|
| 1963 |
+
"128245": {
|
| 1964 |
+
"content": "<|reserved_special_token_237|>",
|
| 1965 |
+
"lstrip": false,
|
| 1966 |
+
"normalized": false,
|
| 1967 |
+
"rstrip": false,
|
| 1968 |
+
"single_word": false,
|
| 1969 |
+
"special": true
|
| 1970 |
+
},
|
| 1971 |
+
"128246": {
|
| 1972 |
+
"content": "<|reserved_special_token_238|>",
|
| 1973 |
+
"lstrip": false,
|
| 1974 |
+
"normalized": false,
|
| 1975 |
+
"rstrip": false,
|
| 1976 |
+
"single_word": false,
|
| 1977 |
+
"special": true
|
| 1978 |
+
},
|
| 1979 |
+
"128247": {
|
| 1980 |
+
"content": "<|reserved_special_token_239|>",
|
| 1981 |
+
"lstrip": false,
|
| 1982 |
+
"normalized": false,
|
| 1983 |
+
"rstrip": false,
|
| 1984 |
+
"single_word": false,
|
| 1985 |
+
"special": true
|
| 1986 |
+
},
|
| 1987 |
+
"128248": {
|
| 1988 |
+
"content": "<|reserved_special_token_240|>",
|
| 1989 |
+
"lstrip": false,
|
| 1990 |
+
"normalized": false,
|
| 1991 |
+
"rstrip": false,
|
| 1992 |
+
"single_word": false,
|
| 1993 |
+
"special": true
|
| 1994 |
+
},
|
| 1995 |
+
"128249": {
|
| 1996 |
+
"content": "<|reserved_special_token_241|>",
|
| 1997 |
+
"lstrip": false,
|
| 1998 |
+
"normalized": false,
|
| 1999 |
+
"rstrip": false,
|
| 2000 |
+
"single_word": false,
|
| 2001 |
+
"special": true
|
| 2002 |
+
},
|
| 2003 |
+
"128250": {
|
| 2004 |
+
"content": "<|reserved_special_token_242|>",
|
| 2005 |
+
"lstrip": false,
|
| 2006 |
+
"normalized": false,
|
| 2007 |
+
"rstrip": false,
|
| 2008 |
+
"single_word": false,
|
| 2009 |
+
"special": true
|
| 2010 |
+
},
|
| 2011 |
+
"128251": {
|
| 2012 |
+
"content": "<|reserved_special_token_243|>",
|
| 2013 |
+
"lstrip": false,
|
| 2014 |
+
"normalized": false,
|
| 2015 |
+
"rstrip": false,
|
| 2016 |
+
"single_word": false,
|
| 2017 |
+
"special": true
|
| 2018 |
+
},
|
| 2019 |
+
"128252": {
|
| 2020 |
+
"content": "<|reserved_special_token_244|>",
|
| 2021 |
+
"lstrip": false,
|
| 2022 |
+
"normalized": false,
|
| 2023 |
+
"rstrip": false,
|
| 2024 |
+
"single_word": false,
|
| 2025 |
+
"special": true
|
| 2026 |
+
},
|
| 2027 |
+
"128253": {
|
| 2028 |
+
"content": "<|reserved_special_token_245|>",
|
| 2029 |
+
"lstrip": false,
|
| 2030 |
+
"normalized": false,
|
| 2031 |
+
"rstrip": false,
|
| 2032 |
+
"single_word": false,
|
| 2033 |
+
"special": true
|
| 2034 |
+
},
|
| 2035 |
+
"128254": {
|
| 2036 |
+
"content": "<|reserved_special_token_246|>",
|
| 2037 |
+
"lstrip": false,
|
| 2038 |
+
"normalized": false,
|
| 2039 |
+
"rstrip": false,
|
| 2040 |
+
"single_word": false,
|
| 2041 |
+
"special": true
|
| 2042 |
+
},
|
| 2043 |
+
"128255": {
|
| 2044 |
+
"content": "<|reserved_special_token_247|>",
|
| 2045 |
+
"lstrip": false,
|
| 2046 |
+
"normalized": false,
|
| 2047 |
+
"rstrip": false,
|
| 2048 |
+
"single_word": false,
|
| 2049 |
+
"special": true
|
| 2050 |
+
}
|
| 2051 |
+
},
|
| 2052 |
+
"bos_token": "<|begin_of_text|>",
|
| 2053 |
+
"clean_up_tokenization_spaces": true,
|
| 2054 |
+
"eos_token": "<|eot_id|>",
|
| 2055 |
+
"extra_special_tokens": {},
|
| 2056 |
+
"model_input_names": [
|
| 2057 |
+
"input_ids",
|
| 2058 |
+
"attention_mask"
|
| 2059 |
+
],
|
| 2060 |
+
"model_max_length": 131072,
|
| 2061 |
+
"pad_token": "<|eot_id|>",
|
| 2062 |
+
"tokenizer_class": "PreTrainedTokenizerFast"
|
| 2063 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:785ade819d78869db7f691e74573b3ce4183646d4e45ad2e1d0a940564b6b20f
|
| 3 |
+
size 6289
|