new tiny model
Browse files- README.md +1 -0
- config.json +33 -0
- make-tiny-deberta.py +164 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
README.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
This is a tiny-deberta random model to be used for basic testing.
|
config.json
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"DebertaForMaskedLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"embedding_size": 32,
|
| 7 |
+
"hidden_act": "gelu",
|
| 8 |
+
"hidden_dropout_prob": 0.1,
|
| 9 |
+
"hidden_size": 32,
|
| 10 |
+
"initializer_range": 0.02,
|
| 11 |
+
"intermediate_size": 64,
|
| 12 |
+
"layer_norm_eps": 1e-07,
|
| 13 |
+
"max_position_embeddings": 128,
|
| 14 |
+
"max_relative_positions": -1,
|
| 15 |
+
"model_type": "deberta",
|
| 16 |
+
"num_attention_heads": 2,
|
| 17 |
+
"num_hidden_layers": 2,
|
| 18 |
+
"pad_token_id": 0,
|
| 19 |
+
"pooler_dropout": 0,
|
| 20 |
+
"pooler_hidden_act": "gelu",
|
| 21 |
+
"pooler_hidden_size": 768,
|
| 22 |
+
"pooler_size": 32,
|
| 23 |
+
"pos_att_type": [
|
| 24 |
+
"c2p",
|
| 25 |
+
"p2c"
|
| 26 |
+
],
|
| 27 |
+
"position_biased_input": false,
|
| 28 |
+
"relative_attention": true,
|
| 29 |
+
"torch_dtype": "float16",
|
| 30 |
+
"transformers_version": "4.9.0.dev0",
|
| 31 |
+
"type_vocab_size": 0,
|
| 32 |
+
"vocab_size": 50265
|
| 33 |
+
}
|
make-tiny-deberta.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# coding: utf-8
|
| 3 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
# This script creates a tiny random model
|
| 18 |
+
#
|
| 19 |
+
# It will be used then as "hf-internal-testing/tiny-albert"
|
| 20 |
+
|
| 21 |
+
# ***To build from scratch***
|
| 22 |
+
#
|
| 23 |
+
# 1. clone sentencepiece into a parent dir
|
| 24 |
+
# git clone https://github.com/google/sentencepiece
|
| 25 |
+
#
|
| 26 |
+
# 2. create a new repo at https://huggingface.co/new
|
| 27 |
+
# make sure to choose 'hf-internal-testing' as the Owner
|
| 28 |
+
#
|
| 29 |
+
# 3. clone
|
| 30 |
+
# git clone https://huggingface.co/hf-internal-testing/tiny-albert
|
| 31 |
+
# cd tiny-albert
|
| 32 |
+
|
| 33 |
+
# 4. start with some pre-existing script from one of the https://huggingface.co/hf-internal-testing/ tiny model repos, e.g.
|
| 34 |
+
# wget https://huggingface.co/hf-internal-testing/tiny-albert/raw/main/make-tiny-albert.py
|
| 35 |
+
# chmod a+x ./make-tiny-albert.py
|
| 36 |
+
# mv ./make-tiny-albert.py ./make-tiny-albert.py
|
| 37 |
+
#
|
| 38 |
+
# 5. automatically rename things from the old names to new ones
|
| 39 |
+
# perl -pi -e 's|Deberta|Deberta|g' make-*
|
| 40 |
+
# perl -pi -e 's|deberta|deberta|g' make-*
|
| 41 |
+
#
|
| 42 |
+
# 6. edit and re-run this script while fixing it up
|
| 43 |
+
# ./make-tiny-deberta.py
|
| 44 |
+
#
|
| 45 |
+
# 7. add/commit/push
|
| 46 |
+
# git add *
|
| 47 |
+
# git commit -m "new tiny model"
|
| 48 |
+
# git push
|
| 49 |
+
|
| 50 |
+
# ***To update***
|
| 51 |
+
#
|
| 52 |
+
# 1. clone the existing repo
|
| 53 |
+
# git clone https://huggingface.co/hf-internal-testing/tiny-deberta
|
| 54 |
+
# cd tiny-deberta
|
| 55 |
+
#
|
| 56 |
+
# 2. edit and re-run this script after doing whatever changes are needed
|
| 57 |
+
# ./make-tiny-deberta.py
|
| 58 |
+
#
|
| 59 |
+
# 3. commit/push
|
| 60 |
+
# git commit -m "new tiny model"
|
| 61 |
+
# git push
|
| 62 |
+
|
| 63 |
+
import sys
|
| 64 |
+
import os
|
| 65 |
+
|
| 66 |
+
# workaround for fast tokenizer protobuf issue, and it's much faster too!
|
| 67 |
+
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
|
| 68 |
+
|
| 69 |
+
from transformers import DebertaTokenizer, DebertaTokenizerFast, DebertaConfig, DebertaForMaskedLM
|
| 70 |
+
|
| 71 |
+
mname_orig = "microsoft/deberta-base"
|
| 72 |
+
mname_tiny = "tiny-deberta"
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
### Tokenizer
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
# XXX: can't figure out how to shrink this tokenizer's vocab! Help?
|
| 79 |
+
|
| 80 |
+
# # Shrink the orig vocab to keep things small (just enough to tokenize any word, so letters+symbols)
|
| 81 |
+
# # DebertaTokenizerFast is fully defined by a tokenizer.json, which contains the vocab and the ids, so we just need to truncate it wisely
|
| 82 |
+
# import subprocess
|
| 83 |
+
# tokenizer_fast = DebertaTokenizerFast.from_pretrained(mname_orig)
|
| 84 |
+
# vocab_keep_items = 50265
|
| 85 |
+
# tmp_dir = f"/tmp/{mname_tiny}"
|
| 86 |
+
# tokenizer_fast.save_pretrained(tmp_dir)
|
| 87 |
+
|
| 88 |
+
# # resize tokenizer.json (vocab.txt will be automatically resized on save_pretrained)
|
| 89 |
+
# # perl -pi -e 's|(2999).*|$1}}}|' tokenizer.json # 0-indexed, so vocab_keep_items-1!
|
| 90 |
+
# closing_pat = "}}}"
|
| 91 |
+
# cmd = (f"perl -pi -e s|({vocab_keep_items-1}).*|$1{closing_pat}| {tmp_dir}/tokenizer.json").split()
|
| 92 |
+
# result = subprocess.run(cmd, capture_output=True, text=True)
|
| 93 |
+
# # reload with modified tokenizer
|
| 94 |
+
# tokenizer_fast_tiny = DebertaTokenizerFast.from_pretrained(tmp_dir)
|
| 95 |
+
# # it seems that DebertaTokenizer is not needed and DebertaTokenizerFast does the job
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# # Shrink the orig vocab to keep things small (just enough to tokenize any word, so letters+symbols)
|
| 99 |
+
# # ElectraTokenizerFast is fully defined by a tokenizer.json, which contains the vocab and the ids, so we just need to truncate it wisely
|
| 100 |
+
# import subprocess
|
| 101 |
+
# tokenizer_fast = DebertaTokenizerFast.from_pretrained(mname_orig)
|
| 102 |
+
# vocab_keep_items = 5120
|
| 103 |
+
# tmp_dir = f"/tmp/{mname_tiny}"
|
| 104 |
+
# vocab_short_path = f"{tmp_dir}/vocab.json"
|
| 105 |
+
# tokenizer_fast.save_pretrained(tmp_dir)
|
| 106 |
+
# # resize tokenizer.json (vocab.txt will be automatically resized on save_pretrained)
|
| 107 |
+
# # perl -pi -e 's|(2999).*|$1}}}|' tokenizer.json # 0-indexed, so vocab_keep_items-1!
|
| 108 |
+
# closing_pat = "}"
|
| 109 |
+
# cmd = (f"perl -pi -e s|({vocab_keep_items-1}).*|$1{closing_pat}| {tmp_dir}/vocab.json").split()
|
| 110 |
+
# result = subprocess.run(cmd, capture_output=True, text=True)
|
| 111 |
+
# # reload with modified tokenizer
|
| 112 |
+
# #tokenizer_fast_tiny = DebertaTokenizerFast.from_pretrained(tmp_dir, vocab_file=vocab_short_path)
|
| 113 |
+
# # it seems that ElectraTokenizer is not needed and ElectraTokenizerFast does the job
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# using full tokenizer for now
|
| 117 |
+
tokenizer_fast_tiny = DebertaTokenizerFast.from_pretrained(mname_orig)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
### Config
|
| 124 |
+
|
| 125 |
+
config_tiny = DebertaConfig.from_pretrained(mname_orig)
|
| 126 |
+
print(config_tiny)
|
| 127 |
+
# remember to update this to the actual config as each model is different and then shrink the numbers
|
| 128 |
+
config_tiny.update(dict(
|
| 129 |
+
#vocab_size=vocab_keep_items,
|
| 130 |
+
embedding_size=32,
|
| 131 |
+
pooler_size=32,
|
| 132 |
+
hidden_size=32,
|
| 133 |
+
intermediate_size=64,
|
| 134 |
+
max_position_embeddings=128,
|
| 135 |
+
num_attention_heads=2,
|
| 136 |
+
num_hidden_layers=2,
|
| 137 |
+
))
|
| 138 |
+
print("New config", config_tiny)
|
| 139 |
+
|
| 140 |
+
### Model
|
| 141 |
+
|
| 142 |
+
model_tiny = DebertaForMaskedLM(config_tiny)
|
| 143 |
+
print(f"{mname_tiny}: num of params {model_tiny.num_parameters()}")
|
| 144 |
+
model_tiny.resize_token_embeddings(len(tokenizer_fast_tiny))
|
| 145 |
+
|
| 146 |
+
# Test
|
| 147 |
+
inputs = tokenizer_fast_tiny("The capital of France is [MASK].", return_tensors="pt")
|
| 148 |
+
#print(inputs)
|
| 149 |
+
outputs = model_tiny(**inputs)
|
| 150 |
+
print("Test with normal tokenizer:", len(outputs.logits[0]))
|
| 151 |
+
|
| 152 |
+
# Save
|
| 153 |
+
model_tiny.half() # makes it smaller
|
| 154 |
+
model_tiny.save_pretrained(".")
|
| 155 |
+
tokenizer_fast_tiny.save_pretrained(".")
|
| 156 |
+
|
| 157 |
+
#print(model_tiny)
|
| 158 |
+
|
| 159 |
+
readme = "README.md"
|
| 160 |
+
if not os.path.exists(readme):
|
| 161 |
+
with open(readme, "w") as f:
|
| 162 |
+
f.write(f"This is a {mname_tiny} random model to be used for basic testing.\n")
|
| 163 |
+
|
| 164 |
+
print(f"Generated {mname_tiny}")
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d3a7c1679f1892c270c43d7ee3e37245c7f4e619179379bdaa313bcd05f46210
|
| 3 |
+
size 3395431
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"bos_token": {"content": "[CLS]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "[SEP]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "[UNK]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "[SEP]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "[PAD]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "[CLS]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"unk_token": {"content": "[UNK]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "[CLS]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "[SEP]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "sep_token": {"content": "[SEP]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "[CLS]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "[PAD]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "do_lower_case": false, "vocab_type": "gpt2", "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "microsoft/deberta-base", "tokenizer_class": "DebertaTokenizer"}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|