ManuelZnnmc nehcgs commited on
Commit
08d3387
·
0 Parent(s):

Duplicate from katanemo/Arch-Router-1.5B

Browse files

Co-authored-by: Shuguang Chen <nehcgs@users.noreply.huggingface.co>

Files changed (10) hide show
  1. .gitattributes +36 -0
  2. LICENSE +97 -0
  3. README.md +172 -0
  4. config.json +28 -0
  5. generation_config.json +14 -0
  6. merges.txt +0 -0
  7. model.safetensors +3 -0
  8. tokenizer.json +3 -0
  9. tokenizer_config.json +209 -0
  10. vocab.json +0 -0
.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Katanemo Labs, Inc. COMMUNITY LICENSE AGREEMENT
2
+ **Version Release Date:** November 15th, 2025
3
+
4
+ This Katanemo Labs, Inc. COMMUNITY LICENSE AGREEMENT is based on the Llama 3.2 Community License, Copyright © Meta Platforms, Inc. The terms and conditions have been adapted to reflect the proprietary nature of Katanemo Labs' materials.
5
+
6
+ ---
7
+
8
+ 1.Definitions
9
+ a. "Agreement": The terms and conditions for use, reproduction, distribution, and modification of the Katanemo Materials set forth herein.
10
+ b. "Documentation": The specifications, manuals, and documentation accompanying Katanemo LLMs v1.
11
+ c. "Licensee" or "you: The individual or entity entering into this Agreement, including your employer if you are acting on their behalf.
12
+ d. "Katanemo": The foundational large language models and software provided by Katanemo Labs, Inc., available at https://huggingface.co/katanemolabs.
13
+ e. "Katanemo Materials": Collectively, Katanemo's proprietary models and Documentation. Some Materials are derived from the Qwen language models licensed under the Qwen RESEARCH LICENSE AGREEMENT.
14
+ f. "Katanemo Labs" or "we": Katanemo Labs Inc., a Delaware, USA Corporation.
15
+
16
+ ---
17
+
18
+ 2.
19
+ By clicking "I Accept" or using any part of the Katanemo Materials, you agree to be bound by this Agreement.
20
+
21
+ ---
22
+
23
+ 3. LICENSE RIGHTS AND REDISTRIBUTION
24
+ a. Grant of Rights
25
+ Subject to the restrictions in Section 4, you are granted a non-exclusive, worldwide, non-transferable, and royalty-free license to:
26
+ - Use, reproduce, distribute, and modify the Katanemo Materials.
27
+ - Create derivative works based on the Katanemo Materials.
28
+
29
+ b. Permitted Redistribution
30
+ If you distribute the Katanemo Materials or any derivative work:
31
+ - You must include a copy of this License.
32
+ - You must prominently display the notice “Built with Katanemo” on a related website or documentation.
33
+
34
+ c. Attribution Requirement
35
+ You must include the following attribution notice in any distributed or public-facing use:
36
+ "Katanemo is licensed under the Katanemo Labs Community License.
37
+ Copyright © Katanemo Labs, Inc. All Rights Reserved."
38
+
39
+ d. Compliance Requirement
40
+ All use of the Katanemo Materials must comply with the Acceptable Use Policy, available at: https://katanemo.com/use-policy
41
+
42
+ ---
43
+
44
+ 4. COMMERCIAL USE AND DISTRIBUTION RESTRICTIONS
45
+
46
+ You may use, reproduce, modify, distribute, and create derivative works from the Katanemo Materials for any purpose, including commercial use, EXCEPT in the following cases:
47
+
48
+ You may NOT package, distribute, or make available the Katanemo Materials as part of:
49
+ - A framework,
50
+ - A proxy server,
51
+ - Middleware,
52
+ - A gateway infrastructure product,
53
+ - Or any product substantially similar in function or purpose to the above,
54
+
55
+ unless you obtain a separate commercial license from Katanemo Labs.
56
+
57
+ **Exception for Katanemo-integrated products.**
58
+ You do not need a separate commercial license when you use the Arch-Router model (or other Katanemo models) **only through a Katanemo product that provides an integrated experience and is itself licensed under Apache 2.0** (for example, the Arch Gateway project at https://github.com/katanemo/archgw), as long as:
59
+
60
+ 1. The model is called only through that integrated product experience and is **not** exposed or offered as a separate framework, proxy, middleware, gateway infrastructure product, or similar service; and
61
+ 2. For Arch Gateway (archgw), any LLM traffic using the Arch-Router model is proxied through Arch Gateway’s data plane.
62
+
63
+ This exception applies only to use of the Katanemo Materials through such Katanemo-provided integrated products. It does **not** permit you to repackage or redistribute the Katanemo Materials as your own framework, proxy server, middleware, gateway infrastructure product, or any substantially similar product.
64
+
65
+ ---
66
+
67
+ This license does not grant trademark rights or rights outside the scope described above.
68
+
69
+ 5. Disclaimer of Warranty
70
+ The Katanemo Materials are provided "AS IS" without warranties of any kind, either express or implied, including but not limited to warranties of title, non-infringement, or fitness for a particular purpose.
71
+
72
+ ---
73
+
74
+ 6. Limitation of Liability
75
+ Katanemo Labs is not liable for any indirect, special, or consequential damages arising out of the use of the Katanemo Materials, even if advised of the possibility of such damages.
76
+
77
+ ---
78
+
79
+ 7. Intellectual Property
80
+ a. Trademarks
81
+ No trademark licenses are granted, except as required for attribution as described in Section 1.b. You may use the “Katanemo” mark according to Katanemo Labs' brand guidelines.
82
+
83
+ b. Ownership
84
+ You own any derivative works or modifications you create, except for portions owned by Katanemo Labs.
85
+
86
+ c. Litigation
87
+ If you file a lawsuit against Katanemo Labs regarding intellectual property, your license under this Agreement terminates.
88
+
89
+ ---
90
+
91
+ 8. Term and Termination
92
+ This Agreement continues until terminated. Katanemo Labs may terminate the Agreement if you breach any terms. Upon termination, you must cease using the Katanemo Materials.
93
+
94
+ ---
95
+
96
+ 10. Governing Law and Jurisdiction
97
+ This Agreement is governed by the laws of the State of Washington, USA. Any disputes will be resolved in the courts of California.
README.md ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - Qwen/Qwen2.5-1.5B-Instruct
4
+ language:
5
+ - en
6
+ library_name: transformers
7
+ license: other
8
+ license_name: katanemo-research
9
+ license_link: https://huggingface.co/katanemo/Arch-Router-1.5B/blob/main/LICENSE
10
+ pipeline_tag: text-generation
11
+ tags:
12
+ - routing
13
+ - preference
14
+ - arxiv:2506.16655
15
+ - llm
16
+ paper: https://arxiv.org/abs/2506.16655
17
+ ---
18
+
19
+ # katanemo/Arch-Router-1.5B
20
+
21
+ ## Overview
22
+ With the rapid proliferation of large language models (LLMs) -- each optimized for different strengths, style, or latency/cost profile -- routing has become an essential technique to operationalize the use of different models. However, existing LLM routing approaches are limited in two key ways: they evaluate performance using benchmarks that often fail to capture human preferences driven by subjective evaluation criteria, and they typically select from a limited pool of models.
23
+
24
+ We introduce a preference-aligned routing framework that guides model selection by matching queries to user-defined domains (e.g., travel) or action types (e.g., image editing) -- offering a practical mechanism to encode preferences in routing decisions. Specifically, we introduce Arch-Router, a compact 1.5B model that learns to map queries to domain-action preferences for model routing decisions. Experiments on conversational datasets demonstrate that our approach achieves state-of-the-art (SOTA) results in matching queries with human preferences, outperforming top proprietary models.
25
+
26
+ This model is described in the paper: https://arxiv.org/abs/2506.16655, and powers [Arch](https://github.com/katanemo/arch) the models-native proxy server for agents.
27
+
28
+ ### How It Works
29
+
30
+ To support effective routing, Arch-Router introduces two key concepts:
31
+ - **Domain** – the high-level thematic category or subject matter of a request (e.g., legal, healthcare, programming).
32
+ - **Action** – the specific type of operation the user wants performed (e.g., summarization, code generation, booking appointment, translation).
33
+
34
+ Both domain and action configs are associated with preferred models or model variants. At inference time, Arch-Router analyzes the incoming prompt to infer its domain and action using semantic similarity, task indicators, and contextual cues. It then applies the user-defined routing preferences to select the model best suited to handle the request.
35
+
36
+ ### Key Features
37
+
38
+ - **Structured Preference Routing**: Aligns prompt request with model strengths using explicit domain–action mappings.
39
+ - **Transparent and Controllable**: Makes routing decisions transparent and configurable, empowering users to customize system behavior.
40
+ - **Flexible and Adaptive**: Supports evolving user needs, model updates, and new domains/actions without retraining the router.
41
+ - **Production-Ready Performance**: Optimized for low-latency, high-throughput applications in multi-model environments.
42
+
43
+ # Requirements
44
+ The code of Arch-Router-1.5B has been in the Hugging Face `transformers` library and we advise you to install latest version:
45
+ ```bash
46
+ pip install transformers>=4.37.0
47
+ ```
48
+
49
+ # How to use
50
+ We use the following example to illustrate how to use our model to perform routing tasks. Please note that, our model works best with our provided prompt format.
51
+ ### Quickstart
52
+ ````python
53
+ import json
54
+ from typing import Any, Dict, List
55
+ from transformers import AutoModelForCausalLM, AutoTokenizer
56
+
57
+ model_name = "katanemo/Arch-Router-1.5B"
58
+ model = AutoModelForCausalLM.from_pretrained(
59
+ model_name, device_map="auto", torch_dtype="auto", trust_remote_code=True
60
+ )
61
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
62
+
63
+ # Please use our provided prompt for best performance
64
+ TASK_INSTRUCTION = """
65
+ You are a helpful assistant designed to find the best suited route.
66
+ You are provided with route description within <routes></routes> XML tags:
67
+ <routes>
68
+
69
+ {routes}
70
+
71
+ </routes>
72
+
73
+ <conversation>
74
+
75
+ {conversation}
76
+
77
+ </conversation>
78
+ """
79
+
80
+ FORMAT_PROMPT = """
81
+ Your task is to decide which route is best suit with user intent on the conversation in <conversation></conversation> XML tags. Follow the instruction:
82
+ 1. If the latest intent from user is irrelevant or user intent is full filled, response with other route {"route": "other"}.
83
+ 2. You must analyze the route descriptions and find the best match route for user latest intent.
84
+ 3. You only response the name of the route that best matches the user's request, use the exact name in the <routes></routes>.
85
+
86
+ Based on your analysis, provide your response in the following JSON formats if you decide to match any route:
87
+ {"route": "route_name"}
88
+ """
89
+
90
+ # Define route config
91
+ route_config = [
92
+ {
93
+ "name": "code_generation",
94
+ "description": "Generating new code snippets, functions, or boilerplate based on user prompts or requirements",
95
+ },
96
+ {
97
+ "name": "bug_fixing",
98
+ "description": "Identifying and fixing errors or bugs in the provided code across different programming languages",
99
+ },
100
+ {
101
+ "name": "performance_optimization",
102
+ "description": "Suggesting improvements to make code more efficient, readable, or scalable",
103
+ },
104
+ {
105
+ "name": "api_help",
106
+ "description": "Assisting with understanding or integrating external APIs and libraries",
107
+ },
108
+ {
109
+ "name": "programming",
110
+ "description": "Answering general programming questions, theory, or best practices",
111
+ },
112
+ ]
113
+
114
+ # Helper function to create the system prompt for our model
115
+ def format_prompt(
116
+ route_config: List[Dict[str, Any]], conversation: List[Dict[str, Any]]
117
+ ):
118
+ return (
119
+ TASK_INSTRUCTION.format(
120
+ routes=json.dumps(route_config), conversation=json.dumps(conversation)
121
+ )
122
+ + FORMAT_PROMPT
123
+ )
124
+
125
+ # Define conversations
126
+
127
+ conversation = [
128
+ {
129
+ "role": "user",
130
+ "content": "fix this module 'torch.utils._pytree' has no attribute 'register_pytree_node'. did you mean: '_register_pytree_node'?",
131
+ }
132
+ ]
133
+
134
+ route_prompt = format_prompt(route_config, conversation)
135
+
136
+ messages = [
137
+ {"role": "user", "content": route_prompt},
138
+ ]
139
+
140
+ input_ids = tokenizer.apply_chat_template(
141
+ messages, add_generation_prompt=True, return_tensors="pt"
142
+ ).to(model.device)
143
+
144
+ # 2. Generate
145
+ generated_ids = model.generate(
146
+ input_ids=input_ids, # or just positional: model.generate(input_ids, …)
147
+ max_new_tokens=32768,
148
+ )
149
+
150
+ # 3. Strip the prompt from each sequence
151
+ prompt_lengths = input_ids.shape[1] # same length for every row here
152
+ generated_only = [
153
+ output_ids[prompt_lengths:] # slice off the prompt tokens
154
+ for output_ids in generated_ids
155
+ ]
156
+
157
+ # 4. Decode if you want text
158
+ response = tokenizer.batch_decode(generated_only, skip_special_tokens=True)[0]
159
+ print(response)
160
+ ````
161
+
162
+ Then you should be able to see the following output string in JSON format:
163
+ ````python
164
+ {"route": "bug_fixing"}
165
+ ````
166
+
167
+ To better understand how to create the route descriptions, please take a look at our [Katanemo API](https://docs.archgw.com/guides/llm_router.html).
168
+
169
+ # License
170
+ Katanemo Arch-Router model is distributed under the [Katanemo license](https://huggingface.co/katanemo/Arch-Router-1.5B/blob/main/LICENSE).
171
+
172
+ GitHub: https://github.com/katanemo/arch
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 1536,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 8960,
12
+ "max_position_embeddings": 32768,
13
+ "max_window_layers": 21,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 12,
16
+ "num_hidden_layers": 28,
17
+ "num_key_value_heads": 2,
18
+ "rms_norm_eps": 1e-06,
19
+ "rope_scaling": null,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 32768,
22
+ "tie_word_embeddings": true,
23
+ "torch_dtype": "float16",
24
+ "transformers_version": "4.51.3",
25
+ "use_cache": true,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151936
28
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.51.3"
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9395579ce8f703f1812ff56215b5fb8a421dd11040c357c0b371b55ea922e5c9
3
+ size 3087466808
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "left",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff