kernelpool commited on
Commit
55b0f5d
·
verified ·
1 Parent(s): 48ab045

Add files using upload-large-folder tool

Browse files
README.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: iquestcoder
4
+ license_link: https://huggingface.co/IQuestLab/IQuest-Coder-V1-40B-Instruct/blob/main/LICENSE
5
+ language:
6
+ - en
7
+ library_name: mlx
8
+ pipeline_tag: text-generation
9
+ base_model: IQuestLab/IQuest-Coder-V1-40B-Loop-Instruct
10
+ tags:
11
+ - mlx
12
+ ---
13
+
14
+ # mlx-community/IQuest-Coder-V1-40B-Loop-Instruct-4bit
15
+
16
+ This model [mlx-community/IQuest-Coder-V1-40B-Loop-Instruct-4bit](https://huggingface.co/mlx-community/IQuest-Coder-V1-40B-Loop-Instruct-4bit) was
17
+ converted to MLX format from [IQuestLab/IQuest-Coder-V1-40B-Loop-Instruct](https://huggingface.co/IQuestLab/IQuest-Coder-V1-40B-Loop-Instruct)
18
+ using mlx-lm version **0.30.0**.
19
+
20
+ ## Use with mlx
21
+
22
+ ```bash
23
+ pip install mlx-lm
24
+ ```
25
+
26
+ ```python
27
+ from mlx_lm import load, generate
28
+
29
+ model, tokenizer = load("mlx-community/IQuest-Coder-V1-40B-Loop-Instruct-4bit")
30
+
31
+ prompt = "hello"
32
+
33
+ if tokenizer.chat_template is not None:
34
+ messages = [{"role": "user", "content": prompt}]
35
+ prompt = tokenizer.apply_chat_template(
36
+ messages, add_generation_prompt=True, return_dict=False,
37
+ )
38
+
39
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
40
+ ```
__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """IQuestLoopCoder model package."""
2
+
3
+ from .configuration_iquestloopcoder import IQuestLoopCoderConfig
4
+ from .modeling_iquestloopcoder import (
5
+ IQuestLoopCoderPreTrainedModel,
6
+ IQuestLoopCoderModel,
7
+ IQuestLoopCoderForCausalLM,
8
+ IQuestLoopCoderCache,
9
+ )
10
+ from .tokenization_iquestcoder import IQuestCoderTokenizer
11
+
12
+ try:
13
+ from .tokenization_iquestcoder import IQuestCoderTokenizerFast
14
+ except ImportError:
15
+ IQuestCoderTokenizerFast = None
16
+
17
+ __all__ = [
18
+ "IQuestLoopCoderConfig",
19
+ "IQuestLoopCoderPreTrainedModel",
20
+ "IQuestLoopCoderModel",
21
+ "IQuestLoopCoderForCausalLM",
22
+ "IQuestLoopCoderCache",
23
+ "IQuestCoderTokenizer",
24
+ "IQuestCoderTokenizerFast",
25
+ ]
26
+
added_tokens.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 75873,
3
+ "</tool_call>": 75877,
4
+ "</tool_response>": 75879,
5
+ "</tools>": 75875,
6
+ "<CLS>": 75858,
7
+ "<EOD>": 75860,
8
+ "<MASK>": 75861,
9
+ "<PAD>": 75862,
10
+ "<SEP>": 75859,
11
+ "<think>": 75872,
12
+ "<tool_call>": 75876,
13
+ "<tool_response>": 75878,
14
+ "<tools>": 75874,
15
+ "<|CLS|>": 75880,
16
+ "<|EOD|>": 75882,
17
+ "<|MASK|>": 75883,
18
+ "<|PAD|>": 75884,
19
+ "<|SEP|>": 75881,
20
+ "<|endoftext|>": 75869,
21
+ "<|file_sep|>": 75871,
22
+ "<|fim_middle|>": 75866,
23
+ "<|fim_pad|>": 75868,
24
+ "<|fim_prefix|>": 75865,
25
+ "<|fim_suffix|>": 75867,
26
+ "<|im_end|>": 75864,
27
+ "<|im_start|>": 75863,
28
+ "<|repo_name|>": 75870
29
+ }
chat_template.jinja ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- else %}
6
+ {{- 'You are LoopCoder, a helpful assistant developed by IQuest.' }}
7
+ {%- endif %}
8
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0].role == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are LoopCoder, a helpful assistant developed by IQuest.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
22
+ {%- for message in messages[::-1] %}
23
+ {%- set index = (messages|length - 1) - loop.index0 %}
24
+ {%- if ns.multi_step_tool and message.role == "user" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
25
+ {%- set ns.multi_step_tool = false %}
26
+ {%- set ns.last_query_index = index %}
27
+ {%- endif %}
28
+ {%- endfor %}
29
+ {%- for message in messages %}
30
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
31
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
32
+ {%- elif message.role == "assistant" %}
33
+ {%- set content = message.content %}
34
+ {{- '<|im_start|>' + message.role + '\n' + content }}
35
+ {%- if message.tool_calls %}
36
+ {%- for tool_call in message.tool_calls %}
37
+ {%- if (loop.first and content) or (not loop.first) %}
38
+ {{- '\n' }}
39
+ {%- endif %}
40
+ {%- if tool_call.function %}
41
+ {%- set tool_call = tool_call.function %}
42
+ {%- endif %}
43
+ {{- '<tool_call>\n{"name": "' }}
44
+ {{- tool_call.name }}
45
+ {{- '", "arguments": ' }}
46
+ {%- if tool_call.arguments is string %}
47
+ {{- tool_call.arguments }}
48
+ {%- else %}
49
+ {{- tool_call.arguments | tojson }}
50
+ {%- endif %}
51
+ {{- '}\n</tool_call>' }}
52
+ {%- endfor %}
53
+ {%- endif %}
54
+ {{- '<|im_end|>\n' }}
55
+ {%- elif message.role == "tool" %}
56
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
57
+ {{- '<|im_start|>user' }}
58
+ {%- endif %}
59
+ {{- '\n<tool_response>\n' }}
60
+ {{- message.content }}
61
+ {{- '\n</tool_response>' }}
62
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
63
+ {{- '<|im_end|>\n' }}
64
+ {%- endif %}
65
+ {%- endif %}
66
+ {%- endfor %}
67
+ {%- if add_generation_prompt %}
68
+ {{- '<|im_start|>assistant\n' }}
69
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "IQuestLoopCoderForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_iquestloopcoder.IQuestLoopCoderConfig",
9
+ "AutoModel": "modeling_iquestloopcoder.IQuestLoopCoderModel",
10
+ "AutoModelForCausalLM": "modeling_iquestloopcoder.IQuestLoopCoderForCausalLM"
11
+ },
12
+ "eos_token_id": [
13
+ 2,
14
+ 75864,
15
+ 75869
16
+ ],
17
+ "head_dim": 128,
18
+ "hidden_act": "silu",
19
+ "hidden_size": 5120,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 27648,
22
+ "loop_num": 2,
23
+ "loop_window_size": 64,
24
+ "max_position_embeddings": 131072,
25
+ "mlp_bias": false,
26
+ "model_type": "iquestloopcoder",
27
+ "num_attention_heads": 40,
28
+ "num_hidden_layers": 80,
29
+ "num_key_value_heads": 8,
30
+ "quantization": {
31
+ "group_size": 64,
32
+ "bits": 4,
33
+ "mode": "affine"
34
+ },
35
+ "quantization_config": {
36
+ "group_size": 64,
37
+ "bits": 4,
38
+ "mode": "affine"
39
+ },
40
+ "rms_norm_eps": 1e-05,
41
+ "rope_theta": 500000,
42
+ "tie_word_embeddings": false,
43
+ "torch_dtype": "bfloat16",
44
+ "transformers_version": "4.40.0",
45
+ "use_cache": true,
46
+ "vocab_size": 76800
47
+ }
configuration_iquestloopcoder.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 IQuestLoopCoder Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ """IQuestLoopCoder model configuration"""
6
+
7
+ from transformers.configuration_utils import PretrainedConfig
8
+ from transformers.utils import logging
9
+
10
+ logger = logging.get_logger(__name__)
11
+
12
+
13
+ class IQuestLoopCoderConfig(PretrainedConfig):
14
+ r"""
15
+ Configuration class for IQuestLoopCoder model.
16
+
17
+ IQuestLoopCoder extends the standard LLaMA architecture with a loop mechanism:
18
+ - Loop 1: Standard attention, stores K1, V1
19
+ - Loop 2+: Mixed attention with gated combination of global (K1,V1) and local (K2,V2) KV
20
+
21
+ The gate is computed as: gate = sigmoid(W @ Q + bias)
22
+ Mixed output = gate * Attention(Q, K1, V1) + (1 - gate) * SlidingWindowAttention(Q, K2, V2)
23
+
24
+ Args:
25
+ vocab_size (`int`, *optional*, defaults to 76800):
26
+ Vocabulary size of the model.
27
+ hidden_size (`int`, *optional*, defaults to 5120):
28
+ Dimension of the hidden representations.
29
+ intermediate_size (`int`, *optional*, defaults to 27648):
30
+ Dimension of the MLP representations (FFN hidden size).
31
+ num_hidden_layers (`int`, *optional*, defaults to 80):
32
+ Number of hidden layers in the Transformer decoder.
33
+ num_attention_heads (`int`, *optional*, defaults to 40):
34
+ Number of attention heads for each attention layer.
35
+ num_key_value_heads (`int`, *optional*, defaults to 8):
36
+ Number of key-value heads (for GQA). If None, defaults to num_attention_heads.
37
+ head_dim (`int`, *optional*, defaults to 128):
38
+ Dimension of each attention head (hidden_size // num_attention_heads).
39
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
40
+ Activation function in the MLP.
41
+ max_position_embeddings (`int`, *optional*, defaults to 8192):
42
+ Maximum sequence length.
43
+ initializer_range (`float`, *optional*, defaults to 0.02):
44
+ Standard deviation for weight initialization.
45
+ rms_norm_eps (`float`, *optional*, defaults to 1e-5):
46
+ Epsilon for RMS normalization layers.
47
+ use_cache (`bool`, *optional*, defaults to `True`):
48
+ Whether to use past key/values for generation.
49
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
50
+ Whether to tie input and output embeddings.
51
+ rope_theta (`float`, *optional*, defaults to 500000.0):
52
+ Base value for rotary position embeddings.
53
+ attention_bias (`bool`, *optional*, defaults to `False`):
54
+ Whether to use bias in attention layers.
55
+ attention_dropout (`float`, *optional*, defaults to 0.0):
56
+ Dropout ratio for attention weights.
57
+ mlp_bias (`bool`, *optional*, defaults to `False`):
58
+ Whether to use bias in MLP layers.
59
+
60
+ # Loop-specific parameters
61
+ loop_num (`int`, *optional*, defaults to 2):
62
+ Number of loops through the decoder.
63
+ loop_window_size (`int`, *optional*, defaults to 64):
64
+ Window size for sliding window attention in Loop 2+.
65
+ """
66
+
67
+ model_type = "iquestloopcoder"
68
+ keys_to_ignore_at_inference = ["past_key_values"]
69
+
70
+ def __init__(
71
+ self,
72
+ vocab_size=76800,
73
+ hidden_size=5120,
74
+ intermediate_size=27648,
75
+ num_hidden_layers=80,
76
+ num_attention_heads=40,
77
+ num_key_value_heads=8,
78
+ head_dim=128,
79
+ hidden_act="silu",
80
+ max_position_embeddings=8192,
81
+ initializer_range=0.02,
82
+ rms_norm_eps=1e-5,
83
+ use_cache=True,
84
+ pad_token_id=None,
85
+ bos_token_id=1,
86
+ eos_token_id=2,
87
+ tie_word_embeddings=False,
88
+ rope_theta=500000.0,
89
+ rope_scaling=None,
90
+ attention_bias=False,
91
+ attention_dropout=0.0,
92
+ mlp_bias=False,
93
+ # Loop-specific parameters
94
+ loop_num=2,
95
+ loop_window_size=64,
96
+ **kwargs,
97
+ ):
98
+ self.vocab_size = vocab_size
99
+ self.max_position_embeddings = max_position_embeddings
100
+ self.hidden_size = hidden_size
101
+ self.intermediate_size = intermediate_size
102
+ self.num_hidden_layers = num_hidden_layers
103
+ self.num_attention_heads = num_attention_heads
104
+ self.head_dim = head_dim
105
+
106
+ # GQA support
107
+ if num_key_value_heads is None:
108
+ num_key_value_heads = num_attention_heads
109
+ self.num_key_value_heads = num_key_value_heads
110
+
111
+ self.hidden_act = hidden_act
112
+ self.initializer_range = initializer_range
113
+ self.rms_norm_eps = rms_norm_eps
114
+ self.use_cache = use_cache
115
+ self.rope_theta = rope_theta
116
+ self.rope_scaling = rope_scaling
117
+ self.attention_bias = attention_bias
118
+ self.attention_dropout = attention_dropout
119
+ self.mlp_bias = mlp_bias
120
+
121
+ # Loop-specific
122
+ self.loop_num = loop_num
123
+ self.loop_window_size = loop_window_size
124
+
125
+ super().__init__(
126
+ pad_token_id=pad_token_id,
127
+ bos_token_id=bos_token_id,
128
+ eos_token_id=eos_token_id,
129
+ tie_word_embeddings=tie_word_embeddings,
130
+ **kwargs,
131
+ )
132
+
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": [2, 75864, 75869],
5
+ "transformers_version": "4.55.4"
6
+ }
model-00001-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:422be93ba0031dbbd6fc653d6180d295040a3a332a3fe28805d04bbe41c56d6f
3
+ size 5353071049
model-00002-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8570f91ce8bb673ab6a45128e5c1b048f4e7a752a471026511c773699709cd95
3
+ size 5326572362
model-00003-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80ec9ac08143fe1ea69ee6148c1a37f33da70a48ebf5c1cd47c7b08c9c612952
3
+ size 5366363667
model-00004-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a147eaaed2ae879af542cced63bd10234c8a9eb98fe3eda9093c0ac303ef190
3
+ size 5330996146
model-00005-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d18865438e6577a139528504406926fb2d34e6f70141a45bf3f34dfd0ae5c897
3
+ size 1009519538
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_iquestloopcoder.py ADDED
@@ -0,0 +1,1421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Modified MIT License
3
+
4
+ Software Copyright© 2025 IQuest Research
5
+
6
+ Our only modification is that, if the Software (or any derivative works
7
+ thereof) is used for any of your commercial products or services, you shall
8
+ prominently display "IQuest Coder" on the user interface of such product or
9
+ service.
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
+ """
27
+
28
+ import math
29
+ from typing import Any, List, Optional, Tuple, Union
30
+
31
+ import torch
32
+ import torch.nn.functional as F
33
+ import torch.utils.checkpoint
34
+ from torch import nn
35
+
36
+ from transformers.activations import ACT2FN
37
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
38
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
39
+ from transformers.modeling_outputs import (
40
+ BaseModelOutputWithPast,
41
+ CausalLMOutputWithPast,
42
+ )
43
+ from transformers.modeling_utils import PreTrainedModel
44
+ from transformers.generation.utils import GenerationMixin
45
+ from transformers.utils import (
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+
52
+ from .configuration_iquestloopcoder import IQuestLoopCoderConfig
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+ _CONFIG_FOR_DOC = "IQuestLoopCoderConfig"
57
+
58
+
59
+ class IQuestLoopCoderCache(Cache):
60
+ """Cache implementation for IQuestLoopCoder that manages shared and local KV caches.
61
+
62
+ - shared_key_cache/shared_value_cache: Stores KV from Loop 1 (global context)
63
+ - local_key_cache/local_value_cache: Stores KV from Loop 2+ (local window, only window_size tokens)
64
+ """
65
+
66
+ def __init__(self, window_size: int, num_layers: int):
67
+ # We intentionally don't call super().__init__ because the parent assumes static cache sizes.
68
+ self.window_size = window_size
69
+ self.num_layers = num_layers
70
+
71
+ # Shared cache: stores Loop 1 KV (global context)
72
+ self.shared_key_cache: List[Optional[torch.Tensor]] = [None] * num_layers
73
+ self.shared_value_cache: List[Optional[torch.Tensor]] = [None] * num_layers
74
+
75
+ # Local cache: stores Loop 2+ KV (sliding window, only window_size tokens)
76
+ self.local_key_cache: List[Optional[torch.Tensor]] = [None] * num_layers
77
+ self.local_value_cache: List[Optional[torch.Tensor]] = [None] * num_layers
78
+
79
+ self.layers: List[Any] = [] # attribute expected by HF Cache utilities
80
+ self._seen_tokens = 0
81
+
82
+ def update_shared(
83
+ self,
84
+ key_states: torch.Tensor,
85
+ value_states: torch.Tensor,
86
+ layer_idx: int,
87
+ cache_kwargs: Optional[dict] = None,
88
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
89
+ """Update shared cache (Loop 1 KV)."""
90
+ if layer_idx < 0 or layer_idx >= self.num_layers:
91
+ raise ValueError(f"layer_idx must be in [0, {self.num_layers}), got {layer_idx}")
92
+
93
+ cached_key = self.shared_key_cache[layer_idx]
94
+ cached_value = self.shared_value_cache[layer_idx]
95
+
96
+ if cached_key is None:
97
+ self.shared_key_cache[layer_idx] = key_states
98
+ self.shared_value_cache[layer_idx] = value_states
99
+ else:
100
+ if (
101
+ key_states.shape[0] != cached_key.shape[0]
102
+ or key_states.shape[1] != cached_key.shape[1]
103
+ or key_states.shape[3] != cached_key.shape[3]
104
+ ):
105
+ raise ValueError(
106
+ "Cached and incoming key/value tensors must match on batch, head, and head_dim dimensions."
107
+ )
108
+ assert cached_value is not None
109
+ self.shared_key_cache[layer_idx] = torch.cat([cached_key, key_states], dim=2)
110
+ self.shared_value_cache[layer_idx] = torch.cat([cached_value, value_states], dim=2)
111
+
112
+ result_key = self.shared_key_cache[layer_idx]
113
+ result_value = self.shared_value_cache[layer_idx]
114
+ assert result_key is not None and result_value is not None
115
+
116
+ # Track sequence length
117
+ self._seen_tokens = result_key.shape[2]
118
+ return result_key, result_value
119
+
120
+ def update_local(
121
+ self,
122
+ key_states: torch.Tensor,
123
+ value_states: torch.Tensor,
124
+ layer_idx: int,
125
+ cache_kwargs: Optional[dict] = None,
126
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
127
+ """Update local cache (Loop 2+ KV) with sliding window management.
128
+
129
+ If the cache is full (window_size tokens), remove the oldest token and add the new one.
130
+ """
131
+ if layer_idx < 0 or layer_idx >= self.num_layers:
132
+ raise ValueError(f"layer_idx must be in [0, {self.num_layers}), got {layer_idx}")
133
+
134
+ cached_key = self.local_key_cache[layer_idx]
135
+ cached_value = self.local_value_cache[layer_idx]
136
+
137
+ if cached_key is None:
138
+ # First token in local cache
139
+ self.local_key_cache[layer_idx] = key_states
140
+ self.local_value_cache[layer_idx] = value_states
141
+ else:
142
+ if (
143
+ key_states.shape[0] != cached_key.shape[0]
144
+ or key_states.shape[1] != cached_key.shape[1]
145
+ or key_states.shape[3] != cached_key.shape[3]
146
+ ):
147
+ raise ValueError(
148
+ "Cached and incoming key/value tensors must match on batch, head, and head_dim dimensions."
149
+ )
150
+ assert cached_value is not None
151
+
152
+ # Check if we need to remove the oldest token
153
+ current_len = cached_key.shape[2]
154
+ if current_len >= self.window_size:
155
+ # Remove the first token (oldest) and add the new one
156
+ self.local_key_cache[layer_idx] = torch.cat([cached_key[:, :, 1:, :], key_states], dim=2)
157
+ self.local_value_cache[layer_idx] = torch.cat([cached_value[:, :, 1:, :], value_states], dim=2)
158
+ else:
159
+ # Just append
160
+ self.local_key_cache[layer_idx] = torch.cat([cached_key, key_states], dim=2)
161
+ self.local_value_cache[layer_idx] = torch.cat([cached_value, value_states], dim=2)
162
+
163
+ result_key = self.local_key_cache[layer_idx]
164
+ result_value = self.local_value_cache[layer_idx]
165
+ assert result_key is not None and result_value is not None
166
+
167
+ return result_key, result_value
168
+
169
+ def get_shared(self, layer_idx: int) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
170
+ """Get shared cache for a layer."""
171
+ if layer_idx < 0 or layer_idx >= self.num_layers:
172
+ return None, None
173
+ return self.shared_key_cache[layer_idx], self.shared_value_cache[layer_idx]
174
+
175
+ def get_local(self, layer_idx: int) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
176
+ """Get local cache for a layer."""
177
+ if layer_idx < 0 or layer_idx >= self.num_layers:
178
+ return None, None
179
+ return self.local_key_cache[layer_idx], self.local_value_cache[layer_idx]
180
+
181
+ def update(
182
+ self,
183
+ key_states: torch.Tensor,
184
+ value_states: torch.Tensor,
185
+ layer_idx: int,
186
+ cache_kwargs: Optional[dict] = None,
187
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
188
+ """Default update method (for compatibility, updates shared cache)."""
189
+ return self.update_shared(key_states, value_states, layer_idx, cache_kwargs)
190
+
191
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
192
+ """Get sequence length from shared cache."""
193
+ if layer_idx is None:
194
+ layer_idx = 0
195
+ if layer_idx < 0 or layer_idx >= len(self.shared_key_cache):
196
+ return 0
197
+ cached = self.shared_key_cache[layer_idx]
198
+ if cached is None:
199
+ return 0
200
+ return cached.shape[2]
201
+
202
+ def get_max_length(self) -> Optional[int]:
203
+ return None
204
+
205
+ def get_usable_length(
206
+ self, new_seq_length: int, layer_idx: Optional[int] = 0
207
+ ) -> int:
208
+ return self.get_seq_length(layer_idx)
209
+
210
+ def reorder_cache(self, beam_idx: torch.LongTensor) -> None:
211
+ """Reorder cache for beam search."""
212
+ for layer_idx in range(self.num_layers):
213
+ if self.shared_key_cache[layer_idx] is not None:
214
+ device = self.shared_key_cache[layer_idx].device
215
+ self.shared_key_cache[layer_idx] = self.shared_key_cache[layer_idx].index_select(0, beam_idx.to(device))
216
+ self.shared_value_cache[layer_idx] = self.shared_value_cache[layer_idx].index_select(0, beam_idx.to(device))
217
+
218
+ if self.local_key_cache[layer_idx] is not None:
219
+ device = self.local_key_cache[layer_idx].device
220
+ self.local_key_cache[layer_idx] = self.local_key_cache[layer_idx].index_select(0, beam_idx.to(device))
221
+ self.local_value_cache[layer_idx] = self.local_value_cache[layer_idx].index_select(0, beam_idx.to(device))
222
+
223
+ @property
224
+ def is_compileable(self) -> bool:
225
+ return False
226
+
227
+ def clear(self) -> None:
228
+ """Clear all caches."""
229
+ logger.debug("Clearing IQuestLoopCoderCache")
230
+ self.shared_key_cache = [None] * self.num_layers
231
+ self.shared_value_cache = [None] * self.num_layers
232
+ self.local_key_cache = [None] * self.num_layers
233
+ self.local_value_cache = [None] * self.num_layers
234
+ self._seen_tokens = 0
235
+
236
+
237
+ class IQuestLoopCoderRMSNorm(nn.Module):
238
+ """RMS Normalization layer."""
239
+
240
+ def __init__(self, hidden_size, eps=1e-6):
241
+ super().__init__()
242
+ self.weight = nn.Parameter(torch.ones(hidden_size))
243
+ self.variance_epsilon = eps
244
+
245
+ def forward(self, hidden_states):
246
+ input_dtype = hidden_states.dtype
247
+ hidden_states = hidden_states.to(torch.float32)
248
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
249
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
250
+ return self.weight * hidden_states.to(input_dtype)
251
+
252
+
253
+ class IQuestLoopCoderRotaryEmbedding(nn.Module):
254
+ """Rotary Position Embedding (RoPE)."""
255
+
256
+ def __init__(self, dim, max_position_embeddings=8192, base=500000.0, device=None, scaling_factor=1.0):
257
+ super().__init__()
258
+ self.scaling_factor = scaling_factor
259
+ self.dim = dim
260
+ self.max_position_embeddings = max_position_embeddings
261
+ self.base = base
262
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
263
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
264
+ self.max_seq_len_cached = max_position_embeddings
265
+
266
+ @torch.no_grad()
267
+ def forward(self, x, position_ids):
268
+ # x: [batch_size, num_heads, seq_len, head_dim]
269
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
270
+ position_ids_expanded = position_ids[:, None, :].float()
271
+
272
+ device_type = x.device.type
273
+ with torch.autocast(device_type=device_type, enabled=False):
274
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
275
+ emb = torch.cat((freqs, freqs), dim=-1)
276
+ cos = emb.cos()
277
+ sin = emb.sin()
278
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
279
+
280
+
281
+ def rotate_half(x):
282
+ """Rotates half the hidden dims of the input."""
283
+ x1 = x[..., : x.shape[-1] // 2]
284
+ x2 = x[..., x.shape[-1] // 2 :]
285
+ return torch.cat((-x2, x1), dim=-1)
286
+
287
+
288
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
289
+ """Applies Rotary Position Embedding to the query and key tensors."""
290
+ cos = cos.unsqueeze(unsqueeze_dim)
291
+ sin = sin.unsqueeze(unsqueeze_dim)
292
+ q_embed = (q * cos) + (rotate_half(q) * sin)
293
+ k_embed = (k * cos) + (rotate_half(k) * sin)
294
+ return q_embed, k_embed
295
+
296
+
297
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
298
+ """Expand KV heads to match query heads for GQA."""
299
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
300
+ if n_rep == 1:
301
+ return hidden_states
302
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
303
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
304
+
305
+
306
+ class IQuestLoopCoderMLP(nn.Module):
307
+ """MLP with SwiGLU activation."""
308
+
309
+ def __init__(self, config):
310
+ super().__init__()
311
+ self.config = config
312
+ self.hidden_size = config.hidden_size
313
+ self.intermediate_size = config.intermediate_size
314
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
315
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
316
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
317
+ self.act_fn = ACT2FN[config.hidden_act]
318
+
319
+ def forward(self, x):
320
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
321
+
322
+
323
+ class LoopGateProjection(nn.Module):
324
+ """Gate projection for mixed attention in Loop 2+.
325
+
326
+ Computes: g = sigmoid(linear(Q)) for each head independently.
327
+ This gate determines how much to use Loop1's KV (global) vs current loop's KV (local).
328
+ """
329
+
330
+ def __init__(self, num_heads: int, head_dim: int):
331
+ super().__init__()
332
+ self.num_heads = num_heads
333
+ self.head_dim = head_dim
334
+ # Each head has its own gate: Linear(head_dim -> 1) per head
335
+ # Implemented as [num_heads, head_dim] weight + [num_heads] bias
336
+ self.weight = nn.Parameter(torch.zeros(num_heads, head_dim))
337
+ self.bias = nn.Parameter(torch.zeros(num_heads))
338
+
339
+ def forward(self, query: torch.Tensor) -> torch.Tensor:
340
+ """Compute gate values from query tensor.
341
+
342
+ Args:
343
+ query: [batch, num_heads, seq_len, head_dim]
344
+
345
+ Returns:
346
+ gate: [batch, num_heads, seq_len, 1]
347
+ """
348
+ # query: [batch, num_heads, seq_len, head_dim]
349
+ # weight: [num_heads, head_dim]
350
+ # For each head h: gate_h = query[:, h, :, :] @ weight[h, :].T + bias[h]
351
+ # Using einsum: gate = einsum('bhsd,hd->bhs', query, weight) + bias
352
+ gate_logits = torch.einsum('bhsd,hd->bhs', query, self.weight) # [batch, num_heads, seq_len]
353
+ gate_logits = gate_logits + self.bias[None, :, None] # broadcast bias
354
+ gate = torch.sigmoid(gate_logits)
355
+ return gate.unsqueeze(-1) # [batch, num_heads, seq_len, 1]
356
+
357
+
358
+ class IQuestLoopCoderAttention(nn.Module):
359
+ """Multi-head attention with GQA support."""
360
+
361
+ def __init__(self, config: IQuestLoopCoderConfig, layer_idx: Optional[int] = None):
362
+ super().__init__()
363
+ self.config = config
364
+ self.layer_idx = layer_idx
365
+
366
+ self.hidden_size = config.hidden_size
367
+ self.num_heads = config.num_attention_heads
368
+ self.head_dim = config.head_dim
369
+ self.num_key_value_heads = config.num_key_value_heads
370
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
371
+ self.max_position_embeddings = config.max_position_embeddings
372
+ self.rope_theta = config.rope_theta
373
+ self.attention_dropout = config.attention_dropout
374
+
375
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
376
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
377
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
378
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
379
+
380
+ self.rotary_emb = IQuestLoopCoderRotaryEmbedding(
381
+ self.head_dim,
382
+ max_position_embeddings=self.max_position_embeddings,
383
+ base=self.rope_theta,
384
+ )
385
+
386
+ def forward(
387
+ self,
388
+ hidden_states: torch.Tensor,
389
+ attention_mask: Optional[torch.Tensor] = None,
390
+ position_ids: Optional[torch.LongTensor] = None,
391
+ past_key_value: Optional[Cache] = None,
392
+ output_attentions: bool = False,
393
+ use_cache: bool = False,
394
+ cache_position: Optional[torch.LongTensor] = None,
395
+ **kwargs,
396
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
397
+ bsz, q_len, _ = hidden_states.size()
398
+
399
+ query_states = self.q_proj(hidden_states)
400
+ key_states = self.k_proj(hidden_states)
401
+ value_states = self.v_proj(hidden_states)
402
+
403
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
404
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
405
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
406
+
407
+ cos, sin = self.rotary_emb(value_states, position_ids)
408
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
409
+
410
+ if past_key_value is not None:
411
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
412
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
413
+
414
+ # Repeat KV for GQA
415
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
416
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
417
+
418
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
419
+
420
+ if attention_mask is not None:
421
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
422
+ attn_weights = attn_weights + causal_mask
423
+
424
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
425
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
426
+ attn_output = torch.matmul(attn_weights, value_states)
427
+
428
+ attn_output = attn_output.transpose(1, 2).contiguous()
429
+ attn_output = attn_output.reshape(bsz, q_len, -1)
430
+ attn_output = self.o_proj(attn_output)
431
+
432
+ return attn_output, attn_weights if output_attentions else None, past_key_value
433
+
434
+ def forward_with_external_kv(
435
+ self,
436
+ hidden_states: torch.Tensor,
437
+ external_key: torch.Tensor,
438
+ external_value: torch.Tensor,
439
+ attention_mask: Optional[torch.Tensor] = None,
440
+ position_ids: Optional[torch.LongTensor] = None,
441
+ sliding_window: Optional[int] = None,
442
+ ) -> torch.Tensor:
443
+ """Forward pass using external K, V (for Loop 2+ mixed attention).
444
+
445
+ Args:
446
+ hidden_states: Input for computing Q
447
+ external_key: Pre-computed K (already with RoPE applied)
448
+ external_value: Pre-computed V
449
+ attention_mask: Causal attention mask
450
+ position_ids: Position IDs
451
+ sliding_window: If set, apply sliding window attention
452
+
453
+ Returns:
454
+ Attention output [batch, seq_len, num_heads, head_dim]
455
+ """
456
+ bsz, q_len, _ = hidden_states.size()
457
+
458
+ # Compute Q from current hidden states
459
+ query_states = self.q_proj(hidden_states)
460
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
461
+
462
+ # Apply RoPE to Q
463
+ cos, sin = self.rotary_emb(query_states, position_ids)
464
+ query_states = (query_states * cos.unsqueeze(1)) + (rotate_half(query_states) * sin.unsqueeze(1))
465
+
466
+ # Use external K, V (already have RoPE for K)
467
+ key_states = external_key
468
+ value_states = external_value
469
+
470
+ # Repeat KV for GQA
471
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
472
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
473
+
474
+ # Compute attention
475
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
476
+
477
+ # Apply attention mask (causal)
478
+ if attention_mask is not None:
479
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
480
+ attn_weights = attn_weights + causal_mask
481
+
482
+ # Apply sliding window mask if needed
483
+ if sliding_window is not None and q_len > sliding_window:
484
+ # Create sliding window mask
485
+ # For each position i, can only attend to [i-window+1, i]
486
+ seq_len = key_states.shape[2]
487
+ row_idx = torch.arange(q_len, device=query_states.device).unsqueeze(1)
488
+ col_idx = torch.arange(seq_len, device=query_states.device).unsqueeze(0)
489
+ window_mask = (col_idx > row_idx) | (col_idx < row_idx - sliding_window + 1)
490
+ window_mask = window_mask.unsqueeze(0).unsqueeze(0) # [1, 1, q_len, seq_len]
491
+ attn_weights = attn_weights.masked_fill(window_mask, float('-inf'))
492
+
493
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
494
+ attn_output = torch.matmul(attn_weights, value_states)
495
+
496
+ # Don't apply o_proj here - return raw attention output
497
+ attn_output = attn_output.transpose(1, 2).contiguous()
498
+ return attn_output # [batch, seq_len, num_heads, head_dim]
499
+
500
+ def get_qkv(
501
+ self,
502
+ hidden_states: torch.Tensor,
503
+ position_ids: Optional[torch.LongTensor] = None,
504
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
505
+ """Get Q, K, V tensors with RoPE applied.
506
+
507
+ Returns:
508
+ query: [batch, num_heads, seq_len, head_dim]
509
+ key: [batch, num_kv_heads, seq_len, head_dim]
510
+ value: [batch, num_kv_heads, seq_len, head_dim]
511
+ """
512
+ bsz, q_len, _ = hidden_states.size()
513
+
514
+ query_states = self.q_proj(hidden_states)
515
+ key_states = self.k_proj(hidden_states)
516
+ value_states = self.v_proj(hidden_states)
517
+
518
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
519
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
520
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
521
+
522
+ cos, sin = self.rotary_emb(value_states, position_ids)
523
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
524
+
525
+ return query_states, key_states, value_states
526
+
527
+ def forward_decode_loop1(
528
+ self,
529
+ hidden_states: torch.Tensor,
530
+ past_shared_key: Optional[torch.Tensor],
531
+ past_shared_value: Optional[torch.Tensor],
532
+ attention_mask: Optional[torch.Tensor] = None,
533
+ position_ids: Optional[torch.LongTensor] = None,
534
+ cache_position: Optional[torch.LongTensor] = None,
535
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
536
+ """Forward pass for Loop 1 in decode stage.
537
+
538
+ Args:
539
+ hidden_states: Current hidden states [batch, 1, hidden_size]
540
+ past_shared_key: Past shared keys from cache [batch, num_kv_heads, past_len, head_dim]
541
+ past_shared_value: Past shared values from cache [batch, num_kv_heads, past_len, head_dim]
542
+ attention_mask: Causal attention mask
543
+ position_ids: Position IDs
544
+ cache_position: Cache position
545
+
546
+ Returns:
547
+ output: Attention output [batch, 1, hidden_size]
548
+ k1: Current key [batch, num_kv_heads, 1, head_dim] (only current token)
549
+ v1: Current value [batch, num_kv_heads, 1, head_dim] (only current token)
550
+ """
551
+ bsz, q_len, _ = hidden_states.size()
552
+
553
+ query_states = self.q_proj(hidden_states)
554
+ key_states = self.k_proj(hidden_states)
555
+ value_states = self.v_proj(hidden_states)
556
+
557
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
558
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
559
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
560
+
561
+ cos, sin = self.rotary_emb(value_states, position_ids)
562
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
563
+
564
+ # Store current token's k1, v1 for return (before concatenation)
565
+ k1_current = key_states # [batch, num_kv_heads, 1, head_dim]
566
+ v1_current = value_states # [batch, num_kv_heads, 1, head_dim]
567
+
568
+ # Concatenate with past shared KV cache for attention computation
569
+ if past_shared_key is not None and past_shared_value is not None:
570
+ key_states = torch.cat([past_shared_key, key_states], dim=2)
571
+ value_states = torch.cat([past_shared_value, value_states], dim=2)
572
+
573
+ # Repeat KV for GQA
574
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
575
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
576
+
577
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
578
+
579
+ if attention_mask is not None:
580
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
581
+ attn_weights = attn_weights + causal_mask
582
+
583
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
584
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
585
+ attn_output = torch.matmul(attn_weights, value_states)
586
+
587
+ attn_output = attn_output.transpose(1, 2).contiguous()
588
+ attn_output = attn_output.reshape(bsz, q_len, -1)
589
+ attn_output = self.o_proj(attn_output)
590
+
591
+ return attn_output, k1_current, v1_current
592
+
593
+ def forward_decode_loop2(
594
+ self,
595
+ hidden_states: torch.Tensor,
596
+ k1: torch.Tensor,
597
+ v1: torch.Tensor,
598
+ past_shared_key: Optional[torch.Tensor],
599
+ past_shared_value: Optional[torch.Tensor],
600
+ past_local_key: Optional[torch.Tensor],
601
+ past_local_value: Optional[torch.Tensor],
602
+ gate_proj: LoopGateProjection,
603
+ attention_mask: Optional[torch.Tensor] = None,
604
+ position_ids: Optional[torch.LongTensor] = None,
605
+ loop_window_size: int = 64,
606
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
607
+ """Forward pass for Loop 2 in decode stage with mixed attention.
608
+
609
+ Args:
610
+ hidden_states: Current hidden states [batch, 1, hidden_size]
611
+ k1: Key from Loop 1 (current token) [batch, num_kv_heads, 1, head_dim]
612
+ v1: Value from Loop 1 (current token) [batch, num_kv_heads, 1, head_dim]
613
+ past_shared_key: Past shared keys from cache [batch, num_kv_heads, past_len, head_dim]
614
+ past_shared_value: Past shared values from cache [batch, num_kv_heads, past_len, head_dim]
615
+ past_local_key: Past local keys from cache [batch, num_kv_heads, window_len, head_dim]
616
+ past_local_value: Past local values from cache [batch, num_kv_heads, window_len, head_dim]
617
+ gate_proj: Gate projection module
618
+ attention_mask: Causal attention mask
619
+ position_ids: Position IDs
620
+ loop_window_size: Window size for sliding window attention
621
+
622
+ Returns:
623
+ output: Attention output [batch, 1, hidden_size]
624
+ k2: Current key [batch, num_kv_heads, 1, head_dim]
625
+ v2: Current value [batch, num_kv_heads, 1, head_dim]
626
+ """
627
+ bsz, q_len, _ = hidden_states.size()
628
+
629
+ # Get Q2, K2, V2 for current loop
630
+ q2, k2, v2 = self.get_qkv(hidden_states, position_ids)
631
+
632
+ # Compute gate: g = sigmoid(linear(Q2))
633
+ gate = gate_proj(q2) # [batch, num_heads, 1, 1]
634
+
635
+ # For attention A: concatenate past shared KV with current k1, v1 (full global context)
636
+ if past_shared_key is not None and past_shared_value is not None:
637
+ k1_full = torch.cat([past_shared_key, k1], dim=2)
638
+ v1_full = torch.cat([past_shared_value, v1], dim=2)
639
+ else:
640
+ k1_full = k1
641
+ v1_full = v1
642
+
643
+ # For attention B: concatenate past local KV with current k2, v2 (sliding window)
644
+ if past_local_key is not None and past_local_value is not None:
645
+ k2_full = torch.cat([past_local_key, k2], dim=2)
646
+ v2_full = torch.cat([past_local_value, v2], dim=2)
647
+ else:
648
+ k2_full = k2
649
+ v2_full = v2
650
+
651
+ # Repeat KV for GQA
652
+ k1_expanded = repeat_kv(k1_full, self.num_key_value_groups)
653
+ v1_expanded = repeat_kv(v1_full, self.num_key_value_groups)
654
+ k2_expanded = repeat_kv(k2_full, self.num_key_value_groups)
655
+ v2_expanded = repeat_kv(v2_full, self.num_key_value_groups)
656
+
657
+ # Attention A: Q2 @ K1_full, V1_full (global, full sequence)
658
+ head_dim = q2.shape[-1]
659
+ attn_weights_A = torch.matmul(q2, k1_expanded.transpose(2, 3)) / math.sqrt(head_dim)
660
+ if attention_mask is not None:
661
+ causal_mask = attention_mask[:, :, :, : k1_expanded.shape[-2]]
662
+ attn_weights_A = attn_weights_A + causal_mask
663
+ attn_weights_A = nn.functional.softmax(attn_weights_A, dim=-1, dtype=torch.float32).to(q2.dtype)
664
+ attn_A = torch.matmul(attn_weights_A, v1_expanded)
665
+
666
+ # Attention B: Q2 @ K2_full, V2_full (local sliding window)
667
+ attn_weights_B = torch.matmul(q2, k2_expanded.transpose(2, 3)) / math.sqrt(head_dim)
668
+ if attention_mask is not None:
669
+ causal_mask = attention_mask[:, :, :, : k2_expanded.shape[-2]]
670
+ attn_weights_B = attn_weights_B + causal_mask
671
+
672
+ # Apply sliding window mask
673
+ q_len_attn = q2.shape[2]
674
+ k_len_attn = k2_expanded.shape[2]
675
+ if q_len_attn <= loop_window_size:
676
+ # If sequence fits in window, use standard attention
677
+ attn_weights_B = nn.functional.softmax(attn_weights_B, dim=-1, dtype=torch.float32).to(q2.dtype)
678
+ else:
679
+ # Apply sliding window mask
680
+ row_idx = torch.arange(q_len_attn, device=q2.device).unsqueeze(1)
681
+ col_idx = torch.arange(k_len_attn, device=q2.device).unsqueeze(0)
682
+ window_mask = (col_idx > row_idx) | (col_idx < row_idx - loop_window_size + 1)
683
+ window_mask = window_mask.unsqueeze(0).unsqueeze(0)
684
+ attn_weights_B = attn_weights_B.masked_fill(window_mask, float('-inf'))
685
+ attn_weights_B = nn.functional.softmax(attn_weights_B, dim=-1, dtype=torch.float32).to(q2.dtype)
686
+ attn_B = torch.matmul(attn_weights_B, v2_expanded)
687
+
688
+ # Mixed attention: gate * A + (1 - gate) * B
689
+ mixed_attn = gate * attn_A + (1 - gate) * attn_B
690
+
691
+ # Reshape and apply output projection
692
+ bsz, num_heads, seq_len, head_dim = mixed_attn.shape
693
+ mixed_attn = mixed_attn.transpose(1, 2).contiguous().reshape(bsz, seq_len, -1)
694
+ attn_output = self.o_proj(mixed_attn)
695
+
696
+ return attn_output, k2, v2
697
+
698
+
699
+ class IQuestLoopCoderDecoderLayer(nn.Module):
700
+ """Transformer decoder layer."""
701
+
702
+ def __init__(self, config: IQuestLoopCoderConfig, layer_idx: int):
703
+ super().__init__()
704
+ self.hidden_size = config.hidden_size
705
+ self.self_attn = IQuestLoopCoderAttention(config=config, layer_idx=layer_idx)
706
+ self.mlp = IQuestLoopCoderMLP(config)
707
+ self.input_layernorm = IQuestLoopCoderRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
708
+ self.post_attention_layernorm = IQuestLoopCoderRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
709
+
710
+ def forward(
711
+ self,
712
+ hidden_states: torch.Tensor,
713
+ attention_mask: Optional[torch.Tensor] = None,
714
+ position_ids: Optional[torch.LongTensor] = None,
715
+ past_key_value: Optional[Cache] = None,
716
+ output_attentions: Optional[bool] = False,
717
+ use_cache: Optional[bool] = False,
718
+ cache_position: Optional[torch.LongTensor] = None,
719
+ **kwargs,
720
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
721
+ residual = hidden_states
722
+ hidden_states = self.input_layernorm(hidden_states)
723
+
724
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
725
+ hidden_states=hidden_states,
726
+ attention_mask=attention_mask,
727
+ position_ids=position_ids,
728
+ past_key_value=past_key_value,
729
+ output_attentions=output_attentions,
730
+ use_cache=use_cache,
731
+ cache_position=cache_position,
732
+ **kwargs,
733
+ )
734
+ hidden_states = residual + hidden_states
735
+
736
+ residual = hidden_states
737
+ hidden_states = self.post_attention_layernorm(hidden_states)
738
+ hidden_states = self.mlp(hidden_states)
739
+ hidden_states = residual + hidden_states
740
+
741
+ outputs = (hidden_states,)
742
+ if output_attentions:
743
+ outputs += (self_attn_weights,)
744
+ if use_cache:
745
+ outputs += (present_key_value,)
746
+ return outputs
747
+
748
+ def forward_loop2_mixed(
749
+ self,
750
+ hidden_states: torch.Tensor,
751
+ k1: torch.Tensor,
752
+ v1: torch.Tensor,
753
+ gate_proj: LoopGateProjection,
754
+ attention_mask: Optional[torch.Tensor] = None,
755
+ position_ids: Optional[torch.LongTensor] = None,
756
+ loop_window_size: int = 64,
757
+ ) -> Tuple[torch.Tensor, float]:
758
+ """Forward pass for Loop 2+ with mixed attention.
759
+
760
+ Args:
761
+ hidden_states: Current hidden states
762
+ k1: Key from Loop 1 [batch, num_kv_heads, seq_len, head_dim]
763
+ v1: Value from Loop 1 [batch, num_kv_heads, seq_len, head_dim]
764
+ gate_proj: Gate projection module for this layer
765
+ attention_mask: Causal attention mask
766
+ position_ids: Position IDs
767
+ loop_window_size: Window size for sliding window attention
768
+
769
+ Returns:
770
+ output hidden states, gate mean value
771
+ """
772
+ residual = hidden_states
773
+ hidden_states_normed = self.input_layernorm(hidden_states)
774
+
775
+ # Get Q2, K2, V2 for current loop
776
+ q2, k2, v2 = self.self_attn.get_qkv(hidden_states_normed, position_ids)
777
+
778
+ # Compute gate: g = sigmoid(linear(Q2))
779
+ # q2: [batch, num_heads, seq_len, head_dim]
780
+ gate = gate_proj(q2) # [batch, num_heads, seq_len, 1]
781
+ gate_mean = gate.detach().mean().item()
782
+
783
+ # Repeat K1, V1 for GQA
784
+ k1_expanded = repeat_kv(k1, self.self_attn.num_key_value_groups)
785
+ v1_expanded = repeat_kv(v1, self.self_attn.num_key_value_groups)
786
+ k2_expanded = repeat_kv(k2, self.self_attn.num_key_value_groups)
787
+ v2_expanded = repeat_kv(v2, self.self_attn.num_key_value_groups)
788
+
789
+ # Attention A: Q2 @ K1, V1 (global, full sequence)
790
+ attn_A = self._compute_attention(q2, k1_expanded, v1_expanded, attention_mask)
791
+
792
+ # Attention B: Q2 @ K2, V2 (local sliding window)
793
+ attn_B = self._compute_attention_with_window(q2, k2_expanded, v2_expanded, attention_mask, loop_window_size)
794
+
795
+ # Mixed attention: gate * A + (1 - gate) * B
796
+ # attn_A, attn_B: [batch, num_heads, seq_len, head_dim]
797
+ mixed_attn = gate * attn_A + (1 - gate) * attn_B
798
+
799
+ # Reshape and apply output projection
800
+ bsz, num_heads, seq_len, head_dim = mixed_attn.shape
801
+ mixed_attn = mixed_attn.transpose(1, 2).contiguous().reshape(bsz, seq_len, -1)
802
+ hidden_states = self.self_attn.o_proj(mixed_attn)
803
+
804
+ hidden_states = residual + hidden_states
805
+
806
+ # MLP
807
+ residual = hidden_states
808
+ hidden_states = self.post_attention_layernorm(hidden_states)
809
+ hidden_states = self.mlp(hidden_states)
810
+ hidden_states = residual + hidden_states
811
+
812
+ return hidden_states, gate_mean
813
+
814
+ def _compute_attention(
815
+ self,
816
+ query: torch.Tensor,
817
+ key: torch.Tensor,
818
+ value: torch.Tensor,
819
+ attention_mask: Optional[torch.Tensor],
820
+ ) -> torch.Tensor:
821
+ """Standard attention computation."""
822
+ head_dim = query.shape[-1]
823
+ attn_weights = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(head_dim)
824
+
825
+ if attention_mask is not None:
826
+ causal_mask = attention_mask[:, :, :, : key.shape[-2]]
827
+ attn_weights = attn_weights + causal_mask
828
+
829
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
830
+ attn_output = torch.matmul(attn_weights, value)
831
+ return attn_output
832
+
833
+ def _compute_attention_with_window(
834
+ self,
835
+ query: torch.Tensor,
836
+ key: torch.Tensor,
837
+ value: torch.Tensor,
838
+ attention_mask: Optional[torch.Tensor],
839
+ window_size: int,
840
+ ) -> torch.Tensor:
841
+ """Attention with sliding window."""
842
+ q_len = query.shape[2]
843
+ k_len = key.shape[2]
844
+ head_dim = query.shape[-1]
845
+
846
+ # If sequence fits in window, use standard attention
847
+ if q_len <= window_size:
848
+ return self._compute_attention(query, key, value, attention_mask)
849
+
850
+ attn_weights = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(head_dim)
851
+
852
+ # Apply causal mask
853
+ if attention_mask is not None:
854
+ causal_mask = attention_mask[:, :, :, : key.shape[-2]]
855
+ attn_weights = attn_weights + causal_mask
856
+
857
+ # Apply sliding window mask
858
+ row_idx = torch.arange(q_len, device=query.device).unsqueeze(1)
859
+ col_idx = torch.arange(k_len, device=query.device).unsqueeze(0)
860
+ # Can only attend to positions in [i - window_size + 1, i]
861
+ window_mask = (col_idx > row_idx) | (col_idx < row_idx - window_size + 1)
862
+ window_mask = window_mask.unsqueeze(0).unsqueeze(0)
863
+ attn_weights = attn_weights.masked_fill(window_mask, float('-inf'))
864
+
865
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
866
+ attn_output = torch.matmul(attn_weights, value)
867
+ return attn_output
868
+
869
+
870
+ class IQuestLoopCoderPreTrainedModel(PreTrainedModel):
871
+ """Base class for IQuestLoopCoder models."""
872
+ config_class = IQuestLoopCoderConfig
873
+ base_model_prefix = "model"
874
+ supports_gradient_checkpointing = True
875
+ _no_split_modules = ["IQuestLoopCoderDecoderLayer"]
876
+ _skip_keys_device_placement = ["past_key_values"]
877
+ _supports_cache_class = True
878
+ _supports_static_cache = True
879
+
880
+ def _init_weights(self, module):
881
+ std = self.config.initializer_range
882
+ if isinstance(module, nn.Linear):
883
+ module.weight.data.normal_(mean=0.0, std=std)
884
+ if module.bias is not None:
885
+ module.bias.data.zero_()
886
+ elif isinstance(module, nn.Embedding):
887
+ module.weight.data.normal_(mean=0.0, std=std)
888
+ if module.padding_idx is not None:
889
+ module.weight.data[module.padding_idx].zero_()
890
+
891
+
892
+ class IQuestLoopCoderModel(IQuestLoopCoderPreTrainedModel):
893
+ """IQuestLoopCoder Transformer decoder model."""
894
+
895
+ def __init__(self, config: IQuestLoopCoderConfig):
896
+ super().__init__(config)
897
+ self.padding_idx = config.pad_token_id
898
+ self.vocab_size = config.vocab_size
899
+
900
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
901
+ self.layers = nn.ModuleList([
902
+ IQuestLoopCoderDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)
903
+ ])
904
+ self.norm = IQuestLoopCoderRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
905
+
906
+ # Gate projections for Loop 2+ (one per layer)
907
+ self.gate_projections = nn.ModuleList([
908
+ LoopGateProjection(config.num_attention_heads, config.head_dim)
909
+ for _ in range(config.num_hidden_layers)
910
+ ])
911
+
912
+ # Loop configuration
913
+ self.loop_num = config.loop_num
914
+ self.loop_window_size = config.loop_window_size
915
+
916
+ self.gradient_checkpointing = False
917
+ self.post_init()
918
+
919
+ def get_input_embeddings(self):
920
+ return self.embed_tokens
921
+
922
+ def set_input_embeddings(self, value):
923
+ self.embed_tokens = value
924
+
925
+ def forward(
926
+ self,
927
+ input_ids: torch.LongTensor = None,
928
+ attention_mask: Optional[torch.Tensor] = None,
929
+ position_ids: Optional[torch.LongTensor] = None,
930
+ past_key_values: Optional[Cache] = None,
931
+ inputs_embeds: Optional[torch.FloatTensor] = None,
932
+ use_cache: Optional[bool] = None,
933
+ output_attentions: Optional[bool] = None,
934
+ output_hidden_states: Optional[bool] = None,
935
+ return_dict: Optional[bool] = None,
936
+ cache_position: Optional[torch.LongTensor] = None,
937
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
938
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
939
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
940
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
941
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
942
+
943
+ if inputs_embeds is None:
944
+ inputs_embeds = self.embed_tokens(input_ids)
945
+
946
+ seq_length = inputs_embeds.shape[1]
947
+
948
+ # Determine which forward path to use:
949
+ # 1. If past_key_values exists and seq_length == 1: autoregressive generation step
950
+ # -> Use standard attention with KV cache (no loop needed for single token)
951
+ # 2. Otherwise (prefill or training): use loop mechanism
952
+
953
+ is_generation_step = past_key_values is not None and seq_length == 1
954
+
955
+ if is_generation_step:
956
+ # Autoregressive generation: single token, use KV cache
957
+ return self._forward_with_cache(
958
+ inputs_embeds=inputs_embeds,
959
+ attention_mask=attention_mask,
960
+ position_ids=position_ids,
961
+ past_key_values=past_key_values,
962
+ use_cache=use_cache,
963
+ output_attentions=output_attentions,
964
+ output_hidden_states=output_hidden_states,
965
+ return_dict=return_dict,
966
+ cache_position=cache_position,
967
+ )
968
+
969
+ # Prefill or training: use loop mechanism
970
+ return self._forward_loop(
971
+ inputs_embeds=inputs_embeds,
972
+ attention_mask=attention_mask,
973
+ position_ids=position_ids,
974
+ output_attentions=output_attentions,
975
+ output_hidden_states=output_hidden_states,
976
+ return_dict=return_dict,
977
+ use_cache=use_cache,
978
+ cache_position=cache_position,
979
+ )
980
+
981
+ def _forward_loop(
982
+ self,
983
+ inputs_embeds: torch.Tensor,
984
+ attention_mask: Optional[torch.Tensor],
985
+ position_ids: Optional[torch.LongTensor],
986
+ output_attentions: bool,
987
+ output_hidden_states: bool,
988
+ return_dict: bool,
989
+ use_cache: bool = False,
990
+ cache_position: Optional[torch.LongTensor] = None,
991
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
992
+ """Forward with loop mechanism (for training and prefill).
993
+
994
+ This implements the Loop mechanism:
995
+ - Loop 1: Standard attention, stores K1, V1 for each layer
996
+ - Loop 2+: Mixed attention with gated combination of global (K1,V1) and local (K2,V2)
997
+ """
998
+ batch_size, seq_length, _ = inputs_embeds.shape
999
+
1000
+ if position_ids is None:
1001
+ device = inputs_embeds.device
1002
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0)
1003
+
1004
+ if cache_position is None:
1005
+ cache_position = torch.arange(seq_length, device=inputs_embeds.device)
1006
+
1007
+ # Create causal mask
1008
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, None, output_attentions)
1009
+
1010
+ hidden_states = inputs_embeds
1011
+ all_hidden_states = () if output_hidden_states else None
1012
+ all_self_attns = () if output_attentions else None
1013
+
1014
+ # For KV cache during prefill - use IQuestLoopCoderCache
1015
+ # In prefill, past_key_values should be None, so we create a new cache
1016
+ if use_cache:
1017
+ next_decoder_cache = IQuestLoopCoderCache(self.loop_window_size, len(self.layers))
1018
+ else:
1019
+ next_decoder_cache = None
1020
+
1021
+ # ============ Loop 1: Standard forward, store K1, V1 in shared cache ============
1022
+ for layer_idx, decoder_layer in enumerate(self.layers):
1023
+ if output_hidden_states:
1024
+ all_hidden_states += (hidden_states,)
1025
+
1026
+ # Get K1, V1 before standard forward (from original hidden_states, after layernorm)
1027
+ hidden_states_normed = decoder_layer.input_layernorm(hidden_states)
1028
+ q1, k1, v1 = decoder_layer.self_attn.get_qkv(hidden_states_normed, position_ids)
1029
+
1030
+ # Store K1, V1 in shared cache
1031
+ if use_cache:
1032
+ next_decoder_cache.update_shared(k1, v1, layer_idx)
1033
+
1034
+ # Standard forward
1035
+ layer_outputs = decoder_layer(
1036
+ hidden_states,
1037
+ attention_mask=causal_mask,
1038
+ position_ids=position_ids,
1039
+ past_key_value=None,
1040
+ output_attentions=output_attentions,
1041
+ use_cache=False,
1042
+ )
1043
+ hidden_states = layer_outputs[0]
1044
+
1045
+ if output_attentions:
1046
+ all_self_attns += (layer_outputs[1],)
1047
+
1048
+ # ============ Loop 2 to loop_num: Mixed attention, store in local cache ============
1049
+ for loop_idx in range(2, self.loop_num + 1):
1050
+ for layer_idx, decoder_layer in enumerate(self.layers):
1051
+ # Get K1, V1 from shared cache
1052
+ k1, v1 = next_decoder_cache.get_shared(layer_idx) if use_cache else (None, None)
1053
+ if k1 is None or v1 is None:
1054
+ # Fallback: compute K1, V1 if not in cache (shouldn't happen in prefill)
1055
+ hidden_states_normed = decoder_layer.input_layernorm(hidden_states)
1056
+ _, k1, v1 = decoder_layer.self_attn.get_qkv(hidden_states_normed, position_ids)
1057
+
1058
+ gate_proj = self.gate_projections[layer_idx]
1059
+
1060
+ hidden_states, gate_mean = decoder_layer.forward_loop2_mixed(
1061
+ hidden_states,
1062
+ k1=k1,
1063
+ v1=v1,
1064
+ gate_proj=gate_proj,
1065
+ attention_mask=causal_mask,
1066
+ position_ids=position_ids,
1067
+ loop_window_size=self.loop_window_size,
1068
+ )
1069
+
1070
+ # Store Loop 2+ KV in local cache (only for loop_idx == 2)
1071
+ if use_cache and loop_idx == 2:
1072
+ hidden_states_normed = decoder_layer.input_layernorm(hidden_states)
1073
+ _, k2, v2 = decoder_layer.self_attn.get_qkv(hidden_states_normed, position_ids)
1074
+ next_decoder_cache.update_local(k2, v2, layer_idx)
1075
+
1076
+ hidden_states = self.norm(hidden_states)
1077
+
1078
+ if output_hidden_states:
1079
+ all_hidden_states += (hidden_states,)
1080
+
1081
+ if not return_dict:
1082
+ return tuple(v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attns] if v is not None)
1083
+
1084
+ return BaseModelOutputWithPast(
1085
+ last_hidden_state=hidden_states,
1086
+ past_key_values=next_decoder_cache,
1087
+ hidden_states=all_hidden_states,
1088
+ attentions=all_self_attns,
1089
+ )
1090
+
1091
+ def _forward_with_cache(
1092
+ self,
1093
+ inputs_embeds: torch.Tensor,
1094
+ attention_mask: Optional[torch.Tensor],
1095
+ position_ids: Optional[torch.LongTensor],
1096
+ past_key_values: Optional[Cache],
1097
+ use_cache: bool,
1098
+ output_attentions: bool,
1099
+ output_hidden_states: bool,
1100
+ return_dict: bool,
1101
+ cache_position: Optional[torch.LongTensor],
1102
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1103
+ """Forward with KV cache using loop mechanism (for inference generation).
1104
+
1105
+ Loop 1: Standard attention, uses shared KV cache (previous tokens + current token)
1106
+ Loop 2+: Mixed attention, uses local KV cache (sliding window)
1107
+ """
1108
+ batch_size, seq_length, _ = inputs_embeds.shape
1109
+
1110
+ if cache_position is None:
1111
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1112
+ cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device)
1113
+
1114
+ if position_ids is None:
1115
+ position_ids = cache_position.unsqueeze(0)
1116
+
1117
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)
1118
+
1119
+ # Ensure we're using IQuestLoopCoderCache
1120
+ if use_cache:
1121
+ if not isinstance(past_key_values, IQuestLoopCoderCache):
1122
+ # Convert to IQuestLoopCoderCache if needed
1123
+ next_decoder_cache = IQuestLoopCoderCache(self.loop_window_size, len(self.layers))
1124
+ # Copy existing cache if possible
1125
+ if past_key_values is not None:
1126
+ for layer_idx in range(len(self.layers)):
1127
+ try:
1128
+ past_k = past_key_values.key_cache[layer_idx] if hasattr(past_key_values, 'key_cache') else None
1129
+ past_v = past_key_values.value_cache[layer_idx] if hasattr(past_key_values, 'value_cache') else None
1130
+ if past_k is not None and past_v is not None:
1131
+ next_decoder_cache.update_shared(past_k, past_v, layer_idx)
1132
+ except:
1133
+ pass
1134
+ else:
1135
+ next_decoder_cache = past_key_values
1136
+ else:
1137
+ next_decoder_cache = None
1138
+
1139
+ hidden_states = inputs_embeds
1140
+ all_hidden_states = () if output_hidden_states else None
1141
+ all_self_attns = () if output_attentions else None
1142
+
1143
+ # ============ Loop 1: Standard attention, store in shared cache ============
1144
+ for layer_idx, decoder_layer in enumerate(self.layers):
1145
+ if output_hidden_states:
1146
+ all_hidden_states += (hidden_states,)
1147
+
1148
+ # Get past shared KV cache
1149
+ past_shared_key, past_shared_value = None, None
1150
+ if next_decoder_cache is not None:
1151
+ past_shared_key, past_shared_value = next_decoder_cache.get_shared(layer_idx)
1152
+
1153
+ # Forward Loop 1
1154
+ attn_output, k1, v1 = decoder_layer.self_attn.forward_decode_loop1(
1155
+ hidden_states=decoder_layer.input_layernorm(hidden_states),
1156
+ past_shared_key=past_shared_key,
1157
+ past_shared_value=past_shared_value,
1158
+ attention_mask=causal_mask,
1159
+ position_ids=position_ids,
1160
+ cache_position=cache_position,
1161
+ )
1162
+
1163
+ # Update shared cache with current token's Loop 1 KV
1164
+ if use_cache:
1165
+ next_decoder_cache.update_shared(k1, v1, layer_idx)
1166
+
1167
+ hidden_states = hidden_states + attn_output
1168
+
1169
+ # MLP
1170
+ residual = hidden_states
1171
+ hidden_states = decoder_layer.post_attention_layernorm(hidden_states)
1172
+ hidden_states = decoder_layer.mlp(hidden_states)
1173
+ hidden_states = residual + hidden_states
1174
+
1175
+ if output_attentions:
1176
+ all_self_attns += (None,) # We don't return attention weights in decode loop
1177
+
1178
+ # ============ Loop 2 to loop_num: Mixed attention, store in local cache ============
1179
+ # Store k1, v1 from Loop 1 for use in Loop 2+
1180
+ loop1_kv = []
1181
+ for layer_idx in range(len(self.layers)):
1182
+ if next_decoder_cache is not None:
1183
+ k1_full, v1_full = next_decoder_cache.get_shared(layer_idx)
1184
+ if k1_full is not None and v1_full is not None:
1185
+ # Get only the last token (current token)
1186
+ loop1_kv.append((k1_full[:, :, -1:, :], v1_full[:, :, -1:, :], k1_full, v1_full))
1187
+ else:
1188
+ loop1_kv.append((None, None, None, None))
1189
+ else:
1190
+ loop1_kv.append((None, None, None, None))
1191
+
1192
+ for loop_idx in range(2, self.loop_num + 1):
1193
+ for layer_idx, decoder_layer in enumerate(self.layers):
1194
+ # Get k1, v1 (current token's Loop 1 KV) and full shared cache
1195
+ k1_current, v1_current, k1_full, v1_full = loop1_kv[layer_idx]
1196
+ if k1_current is None or v1_current is None:
1197
+ continue
1198
+
1199
+ # Get past local KV cache
1200
+ past_local_key, past_local_value = None, None
1201
+ if next_decoder_cache is not None:
1202
+ past_local_key, past_local_value = next_decoder_cache.get_local(layer_idx)
1203
+
1204
+ gate_proj = self.gate_projections[layer_idx]
1205
+
1206
+ # Forward Loop 2+
1207
+ attn_output, k2, v2 = decoder_layer.self_attn.forward_decode_loop2(
1208
+ hidden_states=decoder_layer.input_layernorm(hidden_states),
1209
+ k1=k1_current,
1210
+ v1=v1_current,
1211
+ past_shared_key=k1_full[:, :, :-1, :] if k1_full is not None and k1_full.shape[2] > 1 else None,
1212
+ past_shared_value=v1_full[:, :, :-1, :] if v1_full is not None and v1_full.shape[2] > 1 else None,
1213
+ past_local_key=past_local_key,
1214
+ past_local_value=past_local_value,
1215
+ gate_proj=gate_proj,
1216
+ attention_mask=causal_mask,
1217
+ position_ids=position_ids,
1218
+ loop_window_size=self.loop_window_size,
1219
+ )
1220
+
1221
+ # Update local cache with current token's Loop 2+ KV
1222
+ if use_cache and loop_idx == 2:
1223
+ next_decoder_cache.update_local(k2, v2, layer_idx)
1224
+
1225
+ hidden_states = hidden_states + attn_output
1226
+
1227
+ # MLP
1228
+ residual = hidden_states
1229
+ hidden_states = decoder_layer.post_attention_layernorm(hidden_states)
1230
+ hidden_states = decoder_layer.mlp(hidden_states)
1231
+ hidden_states = residual + hidden_states
1232
+
1233
+ hidden_states = self.norm(hidden_states)
1234
+
1235
+ if output_hidden_states:
1236
+ all_hidden_states += (hidden_states,)
1237
+
1238
+ next_cache = next_decoder_cache if use_cache else None
1239
+
1240
+ if not return_dict:
1241
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1242
+
1243
+ return BaseModelOutputWithPast(
1244
+ last_hidden_state=hidden_states,
1245
+ past_key_values=next_cache,
1246
+ hidden_states=all_hidden_states,
1247
+ attentions=all_self_attns,
1248
+ )
1249
+
1250
+ def _update_causal_mask(
1251
+ self,
1252
+ attention_mask: torch.Tensor,
1253
+ input_tensor: torch.Tensor,
1254
+ cache_position: torch.Tensor,
1255
+ past_key_values: Cache,
1256
+ output_attentions: bool,
1257
+ ):
1258
+ """Create causal attention mask."""
1259
+ dtype, device = input_tensor.dtype, input_tensor.device
1260
+ min_dtype = torch.finfo(dtype).min
1261
+ sequence_length = input_tensor.shape[1]
1262
+
1263
+ # Determine target length for attention
1264
+ if past_key_values is not None:
1265
+ # For DynamicCache: use get_seq_length() to get cached length
1266
+ # target_length = cached_length + current_sequence_length
1267
+ past_length = past_key_values.get_seq_length()
1268
+ target_length = past_length + sequence_length
1269
+ elif attention_mask is not None:
1270
+ target_length = attention_mask.shape[-1]
1271
+ else:
1272
+ target_length = sequence_length
1273
+
1274
+ # Create causal mask
1275
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
1276
+ if sequence_length != 1:
1277
+ # For prefill: standard causal mask
1278
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1279
+
1280
+ # Adjust for cache position (for generation steps after prefill)
1281
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1282
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
1283
+
1284
+ if attention_mask is not None:
1285
+ causal_mask = causal_mask.clone()
1286
+ mask_length = attention_mask.shape[-1]
1287
+ if mask_length <= target_length:
1288
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
1289
+ padding_mask = padding_mask == 0
1290
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
1291
+
1292
+ return causal_mask
1293
+
1294
+
1295
+ class IQuestLoopCoderForCausalLM(IQuestLoopCoderPreTrainedModel, GenerationMixin):
1296
+ """IQuestLoopCoder model with a causal language modeling head."""
1297
+ _tied_weights_keys = ["lm_head.weight"]
1298
+
1299
+ def __init__(self, config):
1300
+ super().__init__(config)
1301
+ self.model = IQuestLoopCoderModel(config)
1302
+ self.vocab_size = config.vocab_size
1303
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1304
+ self.post_init()
1305
+
1306
+ def get_input_embeddings(self):
1307
+ return self.model.embed_tokens
1308
+
1309
+ def set_input_embeddings(self, value):
1310
+ self.model.embed_tokens = value
1311
+
1312
+ def get_output_embeddings(self):
1313
+ return self.lm_head
1314
+
1315
+ def set_output_embeddings(self, new_embeddings):
1316
+ self.lm_head = new_embeddings
1317
+
1318
+ def set_decoder(self, decoder):
1319
+ self.model = decoder
1320
+
1321
+ def get_decoder(self):
1322
+ return self.model
1323
+
1324
+ def forward(
1325
+ self,
1326
+ input_ids: torch.LongTensor = None,
1327
+ attention_mask: Optional[torch.Tensor] = None,
1328
+ position_ids: Optional[torch.LongTensor] = None,
1329
+ past_key_values: Optional[Cache] = None,
1330
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1331
+ labels: Optional[torch.LongTensor] = None,
1332
+ use_cache: Optional[bool] = None,
1333
+ output_attentions: Optional[bool] = None,
1334
+ output_hidden_states: Optional[bool] = None,
1335
+ return_dict: Optional[bool] = None,
1336
+ cache_position: Optional[torch.LongTensor] = None,
1337
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1338
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1339
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1340
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1341
+
1342
+ outputs = self.model(
1343
+ input_ids=input_ids,
1344
+ attention_mask=attention_mask,
1345
+ position_ids=position_ids,
1346
+ past_key_values=past_key_values,
1347
+ inputs_embeds=inputs_embeds,
1348
+ use_cache=use_cache,
1349
+ output_attentions=output_attentions,
1350
+ output_hidden_states=output_hidden_states,
1351
+ return_dict=return_dict,
1352
+ cache_position=cache_position,
1353
+ )
1354
+
1355
+ hidden_states = outputs[0]
1356
+ logits = self.lm_head(hidden_states)
1357
+ logits = logits.float()
1358
+
1359
+ loss = None
1360
+ if labels is not None:
1361
+ shift_logits = logits[..., :-1, :].contiguous()
1362
+ shift_labels = labels[..., 1:].contiguous()
1363
+ loss_fct = nn.CrossEntropyLoss()
1364
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1365
+ shift_labels = shift_labels.view(-1)
1366
+ shift_labels = shift_labels.to(shift_logits.device)
1367
+ loss = loss_fct(shift_logits, shift_labels)
1368
+
1369
+ if not return_dict:
1370
+ output = (logits,) + outputs[1:]
1371
+ return (loss,) + output if loss is not None else output
1372
+
1373
+ return CausalLMOutputWithPast(
1374
+ loss=loss,
1375
+ logits=logits,
1376
+ past_key_values=outputs.past_key_values,
1377
+ hidden_states=outputs.hidden_states,
1378
+ attentions=outputs.attentions,
1379
+ )
1380
+
1381
+ def prepare_inputs_for_generation(
1382
+ self,
1383
+ input_ids,
1384
+ past_key_values=None,
1385
+ attention_mask=None,
1386
+ inputs_embeds=None,
1387
+ cache_position=None,
1388
+ use_cache=True,
1389
+ **kwargs,
1390
+ ):
1391
+ past_length = 0
1392
+ if past_key_values is not None:
1393
+ past_length = past_key_values.get_seq_length()
1394
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1395
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1396
+ elif past_length < input_ids.shape[1]:
1397
+ input_ids = input_ids[:, past_length:]
1398
+
1399
+ if cache_position is None:
1400
+ cache_position = torch.arange(past_length, past_length + input_ids.shape[1], device=input_ids.device)
1401
+ elif use_cache:
1402
+ cache_position = cache_position[-input_ids.shape[1]:]
1403
+
1404
+ position_ids = cache_position.unsqueeze(0)
1405
+
1406
+ if inputs_embeds is not None and past_key_values is None:
1407
+ model_inputs = {"inputs_embeds": inputs_embeds}
1408
+ else:
1409
+ model_inputs = {"input_ids": input_ids.contiguous()}
1410
+
1411
+ model_inputs.update(
1412
+ {
1413
+ "position_ids": position_ids,
1414
+ "cache_position": cache_position,
1415
+ "past_key_values": past_key_values,
1416
+ "use_cache": use_cache,
1417
+ "attention_mask": attention_mask,
1418
+ }
1419
+ )
1420
+ return model_inputs
1421
+
special_tokens_map.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|CLS|>",
4
+ "<|SEP|>",
5
+ "<|EOD|>",
6
+ "<|MASK|>",
7
+ "<|PAD|>",
8
+ "<|fim_prefix|>",
9
+ "<|fim_middle|>",
10
+ "<|fim_suffix|>",
11
+ "<|im_start|>",
12
+ "<|im_end|>",
13
+ "<|fim_pad|>",
14
+ "<|endoftext|>",
15
+ "<|repo_name|>",
16
+ "<|file_sep|>",
17
+ "<think>",
18
+ "</think>"
19
+ ],
20
+ "bos_token": {
21
+ "content": "<s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "eos_token": {
28
+ "content": "<|im_end|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ },
34
+ "pad_token": {
35
+ "content": "<|endoftext|>",
36
+ "lstrip": false,
37
+ "normalized": false,
38
+ "rstrip": false,
39
+ "single_word": false
40
+ },
41
+ "unk_token": {
42
+ "content": "<unk>",
43
+ "lstrip": false,
44
+ "normalized": true,
45
+ "rstrip": false,
46
+ "single_word": true
47
+ }
48
+ }
tokenization_iquestcoder.py ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tokenization classes for IQuestCoder."""
2
+
3
+ import os
4
+ from shutil import copyfile
5
+ from typing import Any, Dict, List, Optional, Tuple, Union
6
+
7
+ import sentencepiece as spm
8
+
9
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
10
+ from transformers.utils import logging
11
+
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
16
+
17
+ PRETRAINED_VOCAB_FILES_MAP = {
18
+ "vocab_file": {},
19
+ "tokenizer_file": {},
20
+ }
21
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
22
+
23
+
24
+
25
+ class IQuestCoderTokenizer(PreTrainedTokenizer):
26
+
27
+ vocab_files_names = VOCAB_FILES_NAMES
28
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
29
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
30
+ model_input_names = ["input_ids", "attention_mask"]
31
+
32
+ def __init__(
33
+ self,
34
+ vocab_file,
35
+ unk_token="<unk>",
36
+ bos_token="<s>",
37
+ eos_token="</s>",
38
+ pad_token=None,
39
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
40
+ add_bos_token=True,
41
+ add_eos_token=False,
42
+ clean_up_tokenization_spaces=False,
43
+ add_prefix_space=False,
44
+ legacy=None,
45
+ use_default_system_prompt=False,
46
+ chat_template=None,
47
+ **kwargs,
48
+ ):
49
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
50
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
51
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
52
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
53
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
54
+
55
+ # Legacy behavior handling
56
+ if legacy is None:
57
+ logger.warning_once(
58
+ f"You are using the default legacy behaviour of the {self.__class__.__name__}. This is"
59
+ " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
60
+ " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
61
+ " means, and thoroughly read the reason why this was added as explained in"
62
+ " https://github.com/huggingface/transformers/pull/24565"
63
+ )
64
+ legacy = True
65
+
66
+ self.legacy = legacy
67
+ self.vocab_file = vocab_file
68
+ self.add_bos_token = add_bos_token
69
+ self.add_eos_token = add_eos_token
70
+ self.add_prefix_space = add_prefix_space
71
+ self.use_default_system_prompt = use_default_system_prompt
72
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
73
+ self.sp_model.Load(vocab_file)
74
+
75
+
76
+
77
+ super().__init__(
78
+ bos_token=bos_token,
79
+ eos_token=eos_token,
80
+ unk_token=unk_token,
81
+ pad_token=pad_token,
82
+ add_bos_token=add_bos_token,
83
+ add_eos_token=add_eos_token,
84
+ sp_model_kwargs=self.sp_model_kwargs,
85
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
86
+ add_prefix_space=add_prefix_space,
87
+ legacy=legacy,
88
+ use_default_system_prompt=use_default_system_prompt,
89
+ chat_template=chat_template,
90
+ **kwargs,
91
+ )
92
+
93
+ def __getstate__(self):
94
+ state = self.__dict__.copy()
95
+ state["sp_model"] = None
96
+ return state
97
+
98
+ def __setstate__(self, d):
99
+ self.__dict__ = d
100
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
101
+ self.sp_model.Load(self.vocab_file)
102
+
103
+ @property
104
+ def vocab_size(self) -> int:
105
+ """Returns the vocabulary size."""
106
+ return self.sp_model.get_piece_size()
107
+
108
+ def get_vocab(self) -> Dict[str, int]:
109
+ """Returns the vocabulary as a dictionary of token to index."""
110
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
111
+ vocab.update(self.added_tokens_encoder)
112
+ return vocab
113
+
114
+ def _tokenize(self, text: str) -> List[str]:
115
+ """
116
+ Tokenize a string.
117
+
118
+ Args:
119
+ text (`str`): The text to tokenize.
120
+
121
+ Returns:
122
+ `List[str]`: The list of tokens.
123
+ """
124
+ if self.add_prefix_space:
125
+ text = " " + text
126
+
127
+ if self.legacy:
128
+ return self.sp_model.encode(text, out_type=str)
129
+
130
+ # Non-legacy behavior: handle special tokens properly
131
+ return self.sp_model.encode(text, out_type=str)
132
+
133
+ def _convert_token_to_id(self, token: str) -> int:
134
+ """Converts a token (str) to an id using the vocab."""
135
+ return self.sp_model.piece_to_id(token)
136
+
137
+ def _convert_id_to_token(self, index: int) -> str:
138
+ """Converts an index (integer) to a token (str) using the vocab."""
139
+ token = self.sp_model.IdToPiece(index)
140
+ return token
141
+
142
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
143
+ """
144
+ Converts a sequence of tokens (strings) to a single string.
145
+
146
+ This method handles special tokens separately to ensure they are not
147
+ decoded using the SentencePiece model.
148
+
149
+ Args:
150
+ tokens (`List[str]`): The list of tokens to convert.
151
+
152
+ Returns:
153
+ `str`: The decoded string.
154
+ """
155
+ current_sub_tokens = []
156
+ out_string = ""
157
+ prev_is_special = False
158
+ for i, token in enumerate(tokens):
159
+ # make sure that special tokens are not decoded using sentencepiece model
160
+ if token in self.all_special_tokens:
161
+ if not prev_is_special and i != 0:
162
+ out_string += " "
163
+ out_string += self.sp_model.decode(current_sub_tokens) + token
164
+ prev_is_special = True
165
+ current_sub_tokens = []
166
+ else:
167
+ current_sub_tokens.append(token)
168
+ prev_is_special = False
169
+ out_string += self.sp_model.decode(current_sub_tokens)
170
+ return out_string
171
+
172
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
173
+ """
174
+ Save the vocabulary and special tokens file to a directory.
175
+
176
+ Args:
177
+ save_directory (`str`):
178
+ The directory in which to save the vocabulary.
179
+ filename_prefix (`str`, *optional*):
180
+ An optional prefix to add to the named of the saved files.
181
+
182
+ Returns:
183
+ `Tuple(str)`: Paths to the files saved.
184
+ """
185
+ if not os.path.isdir(save_directory):
186
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
187
+ return
188
+ out_vocab_file = os.path.join(
189
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
190
+ )
191
+
192
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
193
+ copyfile(self.vocab_file, out_vocab_file)
194
+ elif not os.path.isfile(self.vocab_file):
195
+ with open(out_vocab_file, "wb") as fi:
196
+ content_spiece_model = self.sp_model.serialized_model_proto()
197
+ fi.write(content_spiece_model)
198
+
199
+ return (out_vocab_file,)
200
+
201
+ def build_inputs_with_special_tokens(
202
+ self,
203
+ token_ids_0: List[int],
204
+ token_ids_1: Optional[List[int]] = None
205
+ ) -> List[int]:
206
+ """
207
+ Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating
208
+ and adding special tokens.
209
+
210
+ An IQuestCoder sequence has the following format:
211
+
212
+ - single sequence: `<s> X </s>` (if add_eos_token is True) or `<s> X` (default)
213
+ - pair of sequences: `<s> A </s> <s> B </s>` (if add_eos_token is True) or `<s> A <s> B` (default)
214
+
215
+ Args:
216
+ token_ids_0 (`List[int]`):
217
+ List of IDs to which the special tokens will be added.
218
+ token_ids_1 (`List[int]`, *optional*):
219
+ Optional second list of IDs for sequence pairs.
220
+
221
+ Returns:
222
+ `List[int]`: List of input IDs with the appropriate special tokens.
223
+ """
224
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
225
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
226
+
227
+ output = bos_token_id + token_ids_0 + eos_token_id
228
+
229
+ if token_ids_1 is not None:
230
+ output = output + bos_token_id + token_ids_1 + eos_token_id
231
+
232
+ return output
233
+
234
+ def get_special_tokens_mask(
235
+ self,
236
+ token_ids_0: List[int],
237
+ token_ids_1: Optional[List[int]] = None,
238
+ already_has_special_tokens: bool = False
239
+ ) -> List[int]:
240
+ """
241
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
242
+ special tokens using the tokenizer `prepare_for_model` method.
243
+
244
+ Args:
245
+ token_ids_0 (`List[int]`):
246
+ List of IDs.
247
+ token_ids_1 (`List[int]`, *optional*):
248
+ Optional second list of IDs for sequence pairs.
249
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
250
+ Whether or not the token list is already formatted with special tokens for the model.
251
+
252
+ Returns:
253
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
254
+ """
255
+ if already_has_special_tokens:
256
+ return super().get_special_tokens_mask(
257
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
258
+ )
259
+
260
+ bos_token_id = [1] if self.add_bos_token else []
261
+ eos_token_id = [1] if self.add_eos_token else []
262
+
263
+ if token_ids_1 is None:
264
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
265
+ return (
266
+ bos_token_id
267
+ + ([0] * len(token_ids_0))
268
+ + eos_token_id
269
+ + bos_token_id
270
+ + ([0] * len(token_ids_1))
271
+ + eos_token_id
272
+ )
273
+
274
+ def create_token_type_ids_from_sequences(
275
+ self,
276
+ token_ids_0: List[int],
277
+ token_ids_1: Optional[List[int]] = None
278
+ ) -> List[int]:
279
+ """
280
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task.
281
+
282
+ An IQuestCoder sequence pair mask has the following format:
283
+
284
+ ```
285
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
286
+ | first sequence | second sequence |
287
+ ```
288
+
289
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
290
+
291
+ Args:
292
+ token_ids_0 (`List[int]`):
293
+ List of IDs.
294
+ token_ids_1 (`List[int]`, *optional*):
295
+ Optional second list of IDs for sequence pairs.
296
+
297
+ Returns:
298
+ `List[int]`: List of token type IDs according to the given sequence(s).
299
+ """
300
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
301
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
302
+
303
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
304
+
305
+ if token_ids_1 is not None:
306
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
307
+
308
+ return output
309
+
310
+ @property
311
+ def default_chat_template(self) -> str:
312
+ """
313
+ Returns the default chat template for IQuestCoder.
314
+
315
+ This template formats conversations with system, user, and assistant roles.
316
+ """
317
+ return DEFAULT_CHAT_TEMPLATE
318
+
319
+ def apply_chat_template(
320
+ self,
321
+ conversation: Union[List[Dict[str, str]], "Conversation"],
322
+ chat_template: Optional[str] = None,
323
+ add_generation_prompt: bool = False,
324
+ tokenize: bool = True,
325
+ padding: bool = False,
326
+ truncation: bool = False,
327
+ max_length: Optional[int] = None,
328
+ return_tensors: Optional[str] = None,
329
+ return_dict: bool = False,
330
+ **tokenizer_kwargs,
331
+ ):
332
+ """
333
+ Apply a chat template to format a conversation.
334
+
335
+ Args:
336
+ conversation (`List[Dict[str, str]]` or `Conversation`):
337
+ A list of dicts with "role" and "content" keys, representing the conversation history.
338
+ chat_template (`str`, *optional*):
339
+ A Jinja template to use for formatting. If not provided, the tokenizer's default will be used.
340
+ add_generation_prompt (`bool`, *optional*, defaults to `False`):
341
+ Whether to add a generation prompt at the end for the assistant to continue.
342
+ tokenize (`bool`, *optional*, defaults to `True`):
343
+ Whether to tokenize the output. If `False`, returns a string.
344
+ padding (`bool`, *optional*, defaults to `False`):
345
+ Whether to pad sequences.
346
+ truncation (`bool`, *optional*, defaults to `False`):
347
+ Whether to truncate sequences.
348
+ max_length (`int`, *optional*):
349
+ Maximum length of the output.
350
+ return_tensors (`str`, *optional*):
351
+ The type of tensors to return ("pt", "tf", "np", or None).
352
+ return_dict (`bool`, *optional*, defaults to `False`):
353
+ Whether to return a dictionary with additional information.
354
+ **tokenizer_kwargs:
355
+ Additional keyword arguments passed to the tokenizer.
356
+
357
+ Returns:
358
+ `Union[str, List[int], BatchEncoding]`: The formatted (and optionally tokenized) conversation.
359
+
360
+ Example:
361
+ ```python
362
+ >>> tokenizer = IQuestCoderTokenizer.from_pretrained("path/to/model")
363
+ >>> conversation = [
364
+ ... {"role": "system", "content": "You are a helpful assistant."},
365
+ ... {"role": "user", "content": "Hello!"},
366
+ ... {"role": "assistant", "content": "Hi there! How can I help you today?"},
367
+ ... {"role": "user", "content": "What's the weather like?"},
368
+ ... ]
369
+ >>> tokenizer.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
370
+ '<|system|>\\nYou are a helpful assistant.\\n</|system|><|user|>\\nHello!\\n</|user|>...'
371
+ ```
372
+ """
373
+ # Use parent class implementation with our template
374
+ return super().apply_chat_template(
375
+ conversation,
376
+ chat_template=chat_template,
377
+ add_generation_prompt=add_generation_prompt,
378
+ tokenize=tokenize,
379
+ padding=padding,
380
+ truncation=truncation,
381
+ max_length=max_length,
382
+ return_tensors=return_tensors,
383
+ return_dict=return_dict,
384
+ **tokenizer_kwargs,
385
+ )
386
+
387
+
388
+ # Try to import and create Fast tokenizer version
389
+ try:
390
+ from transformers import PreTrainedTokenizerFast
391
+ from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, processors
392
+
393
+ class IQuestCoderTokenizerFast(PreTrainedTokenizerFast):
394
+ """
395
+ Construct a "fast" IQuestCoder tokenizer (backed by HuggingFace's *tokenizers* library).
396
+
397
+ This is a fast implementation of [`IQuestCoderTokenizer`] using the 🤗 Tokenizers library.
398
+
399
+ Args:
400
+ vocab_file (`str`, *optional*):
401
+ Path to the vocabulary file (SentencePiece model).
402
+ tokenizer_file (`str`, *optional*):
403
+ Path to a tokenizer JSON file.
404
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
405
+ The unknown token.
406
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
407
+ The beginning of sequence token.
408
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
409
+ The end of sequence token.
410
+ pad_token (`str`, *optional*):
411
+ The token used for padding.
412
+ add_bos_token (`bool`, *optional*, defaults to `True`):
413
+ Whether to add a BOS token at the start of sequences.
414
+ add_eos_token (`bool`, *optional*, defaults to `False`):
415
+ Whether to add an EOS token at the end of sequences.
416
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
417
+ Whether to add an initial space to the input.
418
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
419
+ Whether to use the default system prompt.
420
+ chat_template (`str`, *optional*):
421
+ A Jinja template for formatting conversations.
422
+
423
+ Example:
424
+ ```python
425
+ >>> from tokenization_iquestcoder import IQuestCoderTokenizerFast
426
+
427
+ >>> tokenizer = IQuestCoderTokenizerFast.from_pretrained("path/to/model")
428
+ >>> tokenizer.encode("Hello, world!")
429
+ [1, 15043, 29892, 3186, 29991]
430
+ ```
431
+ """
432
+
433
+ vocab_files_names = VOCAB_FILES_NAMES
434
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
435
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
436
+ model_input_names = ["input_ids", "attention_mask"]
437
+ slow_tokenizer_class = IQuestCoderTokenizer
438
+
439
+ def __init__(
440
+ self,
441
+ vocab_file=None,
442
+ tokenizer_file=None,
443
+ unk_token="<unk>",
444
+ bos_token="<s>",
445
+ eos_token="</s>",
446
+ pad_token=None,
447
+ add_bos_token=True,
448
+ add_eos_token=False,
449
+ add_prefix_space=False,
450
+ use_default_system_prompt=False,
451
+ chat_template=None,
452
+ **kwargs,
453
+ ):
454
+ self.add_bos_token = add_bos_token
455
+ self.add_eos_token = add_eos_token
456
+ self.add_prefix_space = add_prefix_space
457
+ self.use_default_system_prompt = use_default_system_prompt
458
+
459
+ if chat_template is None:
460
+ chat_template = DEFAULT_CHAT_TEMPLATE
461
+
462
+ super().__init__(
463
+ vocab_file=vocab_file,
464
+ tokenizer_file=tokenizer_file,
465
+ unk_token=unk_token,
466
+ bos_token=bos_token,
467
+ eos_token=eos_token,
468
+ pad_token=pad_token,
469
+ add_bos_token=add_bos_token,
470
+ add_eos_token=add_eos_token,
471
+ add_prefix_space=add_prefix_space,
472
+ use_default_system_prompt=use_default_system_prompt,
473
+ chat_template=chat_template,
474
+ **kwargs,
475
+ )
476
+
477
+ @property
478
+ def can_save_slow_tokenizer(self) -> bool:
479
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
480
+
481
+ @property
482
+ def default_chat_template(self) -> str:
483
+ """Returns the default chat template."""
484
+ return DEFAULT_CHAT_TEMPLATE
485
+
486
+ def build_inputs_with_special_tokens(
487
+ self,
488
+ token_ids_0: List[int],
489
+ token_ids_1: Optional[List[int]] = None
490
+ ) -> List[int]:
491
+ """Build model inputs with special tokens."""
492
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
493
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
494
+
495
+ output = bos_token_id + token_ids_0 + eos_token_id
496
+
497
+ if token_ids_1 is not None:
498
+ output = output + bos_token_id + token_ids_1 + eos_token_id
499
+
500
+ return output
501
+
502
+ def get_special_tokens_mask(
503
+ self,
504
+ token_ids_0: List[int],
505
+ token_ids_1: Optional[List[int]] = None,
506
+ already_has_special_tokens: bool = False
507
+ ) -> List[int]:
508
+ """Retrieve special tokens mask."""
509
+ if already_has_special_tokens:
510
+ return super().get_special_tokens_mask(
511
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
512
+ )
513
+
514
+ bos_token_id = [1] if self.add_bos_token else []
515
+ eos_token_id = [1] if self.add_eos_token else []
516
+
517
+ if token_ids_1 is None:
518
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
519
+ return (
520
+ bos_token_id
521
+ + ([0] * len(token_ids_0))
522
+ + eos_token_id
523
+ + bos_token_id
524
+ + ([0] * len(token_ids_1))
525
+ + eos_token_id
526
+ )
527
+
528
+ def create_token_type_ids_from_sequences(
529
+ self,
530
+ token_ids_0: List[int],
531
+ token_ids_1: Optional[List[int]] = None
532
+ ) -> List[int]:
533
+ """Create token type IDs from sequences."""
534
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
535
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
536
+
537
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
538
+
539
+ if token_ids_1 is not None:
540
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
541
+
542
+ return output
543
+
544
+ except ImportError:
545
+ # tokenizers library not available, Fast tokenizer not supported
546
+ IQuestCoderTokenizerFast = None
547
+ logger.info(
548
+ "The `tokenizers` library is not installed. "
549
+ "IQuestCoderTokenizerFast will not be available. "
550
+ "Install it with `pip install tokenizers`."
551
+ )
552
+
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d3be68e090a927f31e0e378d7599b15c206dd47e4a73933775a746cc9c1cd91
3
+ size 1345108
tokenizer_config.json ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": true,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": true,
28
+ "special": true
29
+ },
30
+ "75858": {
31
+ "content": "<CLS>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "75859": {
39
+ "content": "<SEP>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "75860": {
47
+ "content": "<EOD>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "75861": {
55
+ "content": "<MASK>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "75862": {
63
+ "content": "<PAD>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "75863": {
71
+ "content": "<|im_start|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "75864": {
79
+ "content": "<|im_end|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "75865": {
87
+ "content": "<|fim_prefix|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "75866": {
95
+ "content": "<|fim_middle|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "75867": {
103
+ "content": "<|fim_suffix|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "75868": {
111
+ "content": "<|fim_pad|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": true
117
+ },
118
+ "75869": {
119
+ "content": "<|endoftext|>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": true
125
+ },
126
+ "75870": {
127
+ "content": "<|repo_name|>",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": true
133
+ },
134
+ "75871": {
135
+ "content": "<|file_sep|>",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": true
141
+ },
142
+ "75872": {
143
+ "content": "<think>",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": false
149
+ },
150
+ "75873": {
151
+ "content": "</think>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": false
157
+ },
158
+ "75874": {
159
+ "content": "<tools>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": false
165
+ },
166
+ "75875": {
167
+ "content": "</tools>",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": false
173
+ },
174
+ "75876": {
175
+ "content": "<tool_call>",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": false
181
+ },
182
+ "75877": {
183
+ "content": "</tool_call>",
184
+ "lstrip": false,
185
+ "normalized": false,
186
+ "rstrip": false,
187
+ "single_word": false,
188
+ "special": false
189
+ },
190
+ "75878": {
191
+ "content": "<tool_response>",
192
+ "lstrip": false,
193
+ "normalized": false,
194
+ "rstrip": false,
195
+ "single_word": false,
196
+ "special": false
197
+ },
198
+ "75879": {
199
+ "content": "</tool_response>",
200
+ "lstrip": false,
201
+ "normalized": false,
202
+ "rstrip": false,
203
+ "single_word": false,
204
+ "special": false
205
+ },
206
+ "75880": {
207
+ "content": "<|CLS|>",
208
+ "lstrip": false,
209
+ "normalized": false,
210
+ "rstrip": false,
211
+ "single_word": false,
212
+ "special": true
213
+ },
214
+ "75881": {
215
+ "content": "<|SEP|>",
216
+ "lstrip": false,
217
+ "normalized": false,
218
+ "rstrip": false,
219
+ "single_word": false,
220
+ "special": true
221
+ },
222
+ "75882": {
223
+ "content": "<|EOD|>",
224
+ "lstrip": false,
225
+ "normalized": false,
226
+ "rstrip": false,
227
+ "single_word": false,
228
+ "special": true
229
+ },
230
+ "75883": {
231
+ "content": "<|MASK|>",
232
+ "lstrip": false,
233
+ "normalized": false,
234
+ "rstrip": false,
235
+ "single_word": false,
236
+ "special": true
237
+ },
238
+ "75884": {
239
+ "content": "<|PAD|>",
240
+ "lstrip": false,
241
+ "normalized": false,
242
+ "rstrip": false,
243
+ "single_word": false,
244
+ "special": true
245
+ }
246
+ },
247
+ "additional_special_tokens": [
248
+ "<|CLS|>",
249
+ "<|SEP|>",
250
+ "<|EOD|>",
251
+ "<|MASK|>",
252
+ "<|PAD|>",
253
+ "<|fim_prefix|>",
254
+ "<|fim_middle|>",
255
+ "<|fim_suffix|>",
256
+ "<|im_start|>",
257
+ "<|im_end|>",
258
+ "<|fim_pad|>",
259
+ "<|endoftext|>",
260
+ "<|repo_name|>",
261
+ "<|file_sep|>",
262
+ "<think>",
263
+ "</think>"
264
+ ],
265
+ "auto_map": {
266
+ "AutoTokenizer": [
267
+ "tokenization_iquestcoder.IQuestCoderTokenizer",
268
+ null
269
+ ]
270
+ },
271
+ "bos_token": "<s>",
272
+ "clean_up_tokenization_spaces": false,
273
+ "eos_token": "<|im_end|>",
274
+ "extra_special_tokens": {},
275
+ "legacy": true,
276
+ "model_max_length": 131072,
277
+ "pad_token": "<|endoftext|>",
278
+ "padding_side": "right",
279
+ "sp_model_kwargs": {},
280
+ "split_special_tokens": false,
281
+ "tokenizer_class": "IQuestCoderTokenizer",
282
+ "unk_token": "<unk>",
283
+ "use_default_system_prompt": false,
284
+ "use_fast": false
285
+ }