File size: 436 Bytes
7090536
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
from datasets import load_dataset
from transformers import AutoTokenizer


def main():
    ds = load_dataset("takara-ai/micropajama", split="train")
    tok = AutoTokenizer.from_pretrained("BAAI/bge-large-en-v1.5")
    lens = ds.map(lambda b: {"len": [len(x) for x in tok(b["text"], add_special_tokens=False).input_ids]}, batched=True, remove_columns=ds.column_names)
    print(sum(lens["len"]))


if __name__ == "__main__":
    main()