WhoIsShe commited on
Commit
52855b8
·
verified ·
1 Parent(s): 420ef66

Add MNN 4-bit quantized model with Model Card

Browse files
Files changed (10) hide show
  1. .gitattributes +2 -0
  2. README.md +43 -0
  3. config.json +10 -0
  4. embeddings_bf16.bin +3 -0
  5. export_args.json +41 -0
  6. llm.mnn +3 -0
  7. llm.mnn.json +0 -0
  8. llm.mnn.weight +3 -0
  9. llm_config.json +11 -0
  10. tokenizer.txt +0 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ llm.mnn filter=lfs diff=lfs merge=lfs -text
37
+ llm.mnn.weight filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: other
5
+ tags:
6
+ - mnn
7
+ - on-device
8
+ - android
9
+ - ios
10
+ - quantization
11
+ - int4
12
+ - text-generation
13
+ - qwen3
14
+ pipeline_tag: text-generation
15
+ library_name: mnn
16
+ base_model: WhoIsShe/DS-R1-Qwen3-8B-ArliAI-RpR-v4-Small-MNN
17
+ ---
18
+
19
+ # ArliAI/DS-R1-Qwen3-8B-ArliAI-RpR-v4-Small (MNN Quantized)
20
+
21
+ # Original model :
22
+
23
+ * **https://huggingface.co/ArliAI/DS-R1-Qwen3-8B-ArliAI-RpR-v4-Small**
24
+
25
+
26
+ This is a **4-bit quantized** version of the Qwen3-4B-RPG-Roleplay-V2, optimized for **on-device inference** (Android/iOS) using the [Alibaba MNN framework](https://github.com/alibaba/MNN).
27
+
28
+ ## 🚀 Fast Deployment on Android
29
+
30
+ ### 1. Download the App
31
+ Don't build from scratch! Use the official MNN Chat Android app:
32
+ * **[Download APK (GitHub)](https://github.com/alibaba/MNN/releases)**
33
+
34
+ ### 2. Setup
35
+ 1. Download the files from this repo (`llm.mnn`, `llm.mnn.weight`, `config.json`).
36
+ 2. Create a folder on your phone: `/sdcard/MNN/DS-R1-Qwen3-8B-ArliAI-RpR-v4-Small`.
37
+ 3. Copy the files into that folder.
38
+ 4. Open the MNN App and select your folder.
39
+
40
+ ## 💻 Technical Details
41
+ * **Framework:** MNN
42
+ * **Quantization:** 4-bit Asymmetric (Int4)
43
+ * **Model Type:** QWEN3-4B (Uncensored)
config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "llm_model": "llm.mnn",
3
+ "llm_weight": "llm.mnn.weight",
4
+ "backend_type": "cpu",
5
+ "thread_num": 4,
6
+ "precision": "low",
7
+ "memory": "low",
8
+ "sampler_type": "penalty",
9
+ "penalty": 1.1
10
+ }
embeddings_bf16.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b82f6cd09fcf5ed13fb7e2a526c40c6feee98bde3b15cb07c3703b84718e0ef3
3
+ size 1244659712
export_args.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "path": "/content/models/DS-R1-Qwen3-8B-ArliAI-RpR-v4-Small",
3
+ "type": null,
4
+ "tokenizer_path": "/content/models/DS-R1-Qwen3-8B-ArliAI-RpR-v4-Small",
5
+ "eagle_path": null,
6
+ "lora_path": null,
7
+ "gptq_path": null,
8
+ "dst_path": "/kaggle/working/mnn_output",
9
+ "verbose": true,
10
+ "test": null,
11
+ "export": "mnn",
12
+ "onnx_slim": false,
13
+ "quant_bit": 4,
14
+ "quant_block": 64,
15
+ "visual_quant_bit": null,
16
+ "visual_quant_block": null,
17
+ "lm_quant_bit": 4,
18
+ "lm_quant_block": 64,
19
+ "mnnconvert": "../../../build/MNNConvert",
20
+ "ppl": false,
21
+ "awq": false,
22
+ "hqq": false,
23
+ "omni": false,
24
+ "transformer_fuse": false,
25
+ "group_conv_native": false,
26
+ "smooth": false,
27
+ "sym": false,
28
+ "visual_sym": false,
29
+ "seperate_embed": false,
30
+ "lora_split": false,
31
+ "calib_data": null,
32
+ "act_bit": 16,
33
+ "embed_bit": 16,
34
+ "act_sym": false,
35
+ "generate_for_npu": false,
36
+ "skip_weight": false,
37
+ "omni_epochs": 20,
38
+ "omni_lr": 0.005,
39
+ "omni_wd": 0.0001,
40
+ "tie_word_embeddings": false
41
+ }
llm.mnn ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99988403281b7b7e24044376a83fddd4cee4c3775c0a53e70dcbb7127c66a871
3
+ size 702248
llm.mnn.json ADDED
The diff for this file is too large to render. See raw diff
 
llm.mnn.weight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d8873e0f3789636eb4f2a3ca8d127daeed543380b14c8d2022780bedfa309b0
3
+ size 4732532162
llm_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "qwen3",
3
+ "hidden_size": 4096,
4
+ "attention_mask": "float",
5
+ "attention_type": "full",
6
+ "jinja": {
7
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true, is_last_user=false) %}{%- for message in messages %}{%- if message['role'] == 'system' %}{%- if ns.is_first_sp %}{% set ns.system_prompt = ns.system_prompt + message['content'] %}{% set ns.is_first_sp = false %}{%- else %}{% set ns.system_prompt = ns.system_prompt + '\n\n' + message['content'] %}{%- endif %}{%- endif %}{%- endfor %}{{ bos_token }}{{ ns.system_prompt }}{%- for message in messages %}{% set content = message['content'] %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{%- set ns.is_first = false -%}{%- set ns.is_last_user = true -%}{{'<|User|>' + content + '<|Assistant|>'}}{%- endif %}{%- if message['role'] == 'assistant' %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{% endif %}{%- if message['role'] == 'assistant' and message['tool_calls'] is defined and message['tool_calls'] is not none %}{%- set ns.is_last_user = false -%}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{%- endif %}{%- set ns.is_first = false %}{%- set ns.is_tool = false -%}{%- set ns.is_output_first = true %}{%- for tool in message['tool_calls'] %}{%- if not ns.is_first %}{%- if content is none %}{{'<|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- else %}{{content + '<|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- endif %}{%- set ns.is_first = true -%}{%- else %}{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- endif %}{%- endfor %}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- if message['role'] == 'assistant' and (message['tool_calls'] is not defined or message['tool_calls'] is none)%}{%- set ns.is_last_user = false -%}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + content + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{{content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_last_user = false -%}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + content + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\n<|tool▁output▁begin|>' + content + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_last_user and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}",
8
+ "bos": "<|begin▁of▁sentence|>",
9
+ "eos": "<|end▁of▁sentence|>"
10
+ }
11
+ }
tokenizer.txt ADDED
The diff for this file is too large to render. See raw diff