File size: 3,087 Bytes
06e5009
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import textwrap
from pylate import models, indexes

# === Config ===
MODEL_PATH = "/home/astgpu3/workspace/bolorjinbat/LFM2-ColBERT-350M"
DOCS_FOLDER = "huuli_docs"          # Folder with your .txt files
INDEX_FOLDER = "huuli_index_storage" # Where index shards are stored
INDEX_NAME = "huuli_index"           # Index label
CHUNK_CHARS = 800                    # ~characters per chunk


def load_documents(folder_path=DOCS_FOLDER, chunk_chars=CHUNK_CHARS):
    """
    Read all .txt files in folder_path and split them into chunks.

    Returns:
        ids:      ["file1.txt__chunk_0", ...]
        docs:     ["chunk text...", ...]
        doc_map:  {id: "chunk text"}
    """
    docs, ids, doc_map = [], [], {}

    if not os.path.isdir(folder_path):
        raise FileNotFoundError(f"Folder not found: {folder_path}")

    for fname in sorted(os.listdir(folder_path)):
        if not fname.endswith(".txt"):
            continue
        full_path = os.path.join(folder_path, fname)
        with open(full_path, "r", encoding="utf-8") as f:
            raw_text = f.read().strip()

        # Split long files into reasonably sized character chunks.
        # textwrap.wrap keeps things simple; adjust as you like.
        chunks = textwrap.wrap(raw_text, width=chunk_chars, break_long_words=False)

        for i, chunk in enumerate(chunks):
            doc_id = f"{fname}__chunk_{i}"
            ids.append(doc_id)
            docs.append(chunk)
            doc_map[doc_id] = chunk

    return ids, docs, doc_map


def build_index(doc_ids, doc_texts,
                model_path=MODEL_PATH,
                index_folder=INDEX_FOLDER,
                index_name=INDEX_NAME,
                override=True):
    """
    Load ColBERT, create a PLAID index, encode docs, and write to index.
    """
    print("[1] Loading model...")
    model = models.ColBERT(model_name_or_path=model_path)
    # Safety: some tokenizers need a pad token
    if getattr(model, "tokenizer", None) and model.tokenizer.pad_token is None:
        model.tokenizer.pad_token = model.tokenizer.eos_token

    print("[2] Creating PLAID index...")
    index = indexes.PLAID(
        index_folder=index_folder,
        index_name=index_name,
        override=override,  # True: rebuild; False: append if supported
    )

    print("[3] Encoding documents...")
    doc_embeddings = model.encode(
        doc_texts,
        batch_size=16,
        is_query=False,        # document side
        show_progress_bar=True,
    )

    print("[4] Writing to index...")
    index.add_documents(
        documents_ids=doc_ids,
        documents_embeddings=doc_embeddings,
    )

    print("✅ Index build complete.")
    return model, index


if __name__ == "__main__":
    print("📚 Loading and chunking documents...")
    doc_ids, doc_texts, _ = load_documents(DOCS_FOLDER, CHUNK_CHARS)

    print("🧱 Building index...")
    build_index(doc_ids, doc_texts,
                model_path=MODEL_PATH,
                index_folder=INDEX_FOLDER,
                index_name=INDEX_NAME,
                override=True)