LFM2-ColBERT-350M / indexer.py
bolorjinbat's picture
Add files using upload-large-folder tool
06e5009 verified
import os
import textwrap
from pylate import models, indexes
# === Config ===
MODEL_PATH = "/home/astgpu3/workspace/bolorjinbat/LFM2-ColBERT-350M"
DOCS_FOLDER = "huuli_docs" # Folder with your .txt files
INDEX_FOLDER = "huuli_index_storage" # Where index shards are stored
INDEX_NAME = "huuli_index" # Index label
CHUNK_CHARS = 800 # ~characters per chunk
def load_documents(folder_path=DOCS_FOLDER, chunk_chars=CHUNK_CHARS):
"""
Read all .txt files in folder_path and split them into chunks.
Returns:
ids: ["file1.txt__chunk_0", ...]
docs: ["chunk text...", ...]
doc_map: {id: "chunk text"}
"""
docs, ids, doc_map = [], [], {}
if not os.path.isdir(folder_path):
raise FileNotFoundError(f"Folder not found: {folder_path}")
for fname in sorted(os.listdir(folder_path)):
if not fname.endswith(".txt"):
continue
full_path = os.path.join(folder_path, fname)
with open(full_path, "r", encoding="utf-8") as f:
raw_text = f.read().strip()
# Split long files into reasonably sized character chunks.
# textwrap.wrap keeps things simple; adjust as you like.
chunks = textwrap.wrap(raw_text, width=chunk_chars, break_long_words=False)
for i, chunk in enumerate(chunks):
doc_id = f"{fname}__chunk_{i}"
ids.append(doc_id)
docs.append(chunk)
doc_map[doc_id] = chunk
return ids, docs, doc_map
def build_index(doc_ids, doc_texts,
model_path=MODEL_PATH,
index_folder=INDEX_FOLDER,
index_name=INDEX_NAME,
override=True):
"""
Load ColBERT, create a PLAID index, encode docs, and write to index.
"""
print("[1] Loading model...")
model = models.ColBERT(model_name_or_path=model_path)
# Safety: some tokenizers need a pad token
if getattr(model, "tokenizer", None) and model.tokenizer.pad_token is None:
model.tokenizer.pad_token = model.tokenizer.eos_token
print("[2] Creating PLAID index...")
index = indexes.PLAID(
index_folder=index_folder,
index_name=index_name,
override=override, # True: rebuild; False: append if supported
)
print("[3] Encoding documents...")
doc_embeddings = model.encode(
doc_texts,
batch_size=16,
is_query=False, # document side
show_progress_bar=True,
)
print("[4] Writing to index...")
index.add_documents(
documents_ids=doc_ids,
documents_embeddings=doc_embeddings,
)
print("✅ Index build complete.")
return model, index
if __name__ == "__main__":
print("📚 Loading and chunking documents...")
doc_ids, doc_texts, _ = load_documents(DOCS_FOLDER, CHUNK_CHARS)
print("🧱 Building index...")
build_index(doc_ids, doc_texts,
model_path=MODEL_PATH,
index_folder=INDEX_FOLDER,
index_name=INDEX_NAME,
override=True)