|
|
import os |
|
|
import textwrap |
|
|
from pylate import models, indexes, retrieve |
|
|
|
|
|
from fastapi import FastAPI |
|
|
from pydantic import BaseModel |
|
|
|
|
|
import torch |
|
|
|
|
|
|
|
|
MODEL_PATH = "/home/astgpu3/workspace/bolorjinbat/LFM2-ColBERT-350M" |
|
|
DOCS_DIR = "/home/astgpu3/workspace/bolorjinbat/LFM2-ColBERT-350M/merged_huuli_docs" |
|
|
INDEX_FOLDER = "mergedhuuli_index_docs" |
|
|
INDEX_NAME = "mergedhuuli_index_v1" |
|
|
CHUNK_CHARS = 800 |
|
|
|
|
|
|
|
|
|
|
|
def load_documents(folder_path=DOCS_DIR, chunk_chars=CHUNK_CHARS): |
|
|
if not os.path.isdir(folder_path): |
|
|
raise FileNotFoundError(f"Баримтын фолдер олдсонгүй: {os.path.abspath(folder_path)}") |
|
|
|
|
|
docs, ids, doc_map = [], [], {} |
|
|
file_count = 0 |
|
|
total_chunks = 0 |
|
|
|
|
|
print(f"[INIT] load_documents: folder={folder_path}, CHUNK_CHARS={chunk_chars}") |
|
|
|
|
|
for fname in os.listdir(folder_path): |
|
|
if not fname.endswith(".txt"): |
|
|
continue |
|
|
file_count += 1 |
|
|
full_path = os.path.join(folder_path, fname) |
|
|
with open(full_path, "r", encoding="utf-8") as f: |
|
|
raw_text = f.read().strip() |
|
|
|
|
|
|
|
|
chunks = textwrap.wrap(raw_text, width=chunk_chars) or [raw_text] |
|
|
|
|
|
print(f"[INIT] - Файл: {fname}, нийт урт={len(raw_text)} тэмдэгт, chunks={len(chunks)}") |
|
|
|
|
|
for i, chunk in enumerate(chunks): |
|
|
doc_id = f"{fname}__chunk_{i}" |
|
|
ids.append(doc_id) |
|
|
docs.append(chunk) |
|
|
doc_map[doc_id] = chunk |
|
|
total_chunks += 1 |
|
|
|
|
|
if file_count == 0: |
|
|
raise RuntimeError(f"'{folder_path}' дотор .txt файл олдсонгүй.") |
|
|
|
|
|
print(f"[INIT] Нийт {file_count} файл, {total_chunks} chunk ачааллаа.") |
|
|
print(f"[INIT] Эхний 5 chunk ID: {ids[:5]}") |
|
|
|
|
|
return ids, docs, doc_map |
|
|
|
|
|
|
|
|
def build_index(doc_ids, doc_texts): |
|
|
print("[INIT] [1] Модель ачаалж байна...") |
|
|
model = models.ColBERT(model_name_or_path=MODEL_PATH) |
|
|
if hasattr(model, "tokenizer") and getattr(model.tokenizer, "pad_token", None) is None: |
|
|
model.tokenizer.pad_token = model.tokenizer.eos_token |
|
|
|
|
|
print("[INIT] [2] PLAID индекс үүсгэж байна...") |
|
|
index = indexes.PLAID( |
|
|
index_folder=INDEX_FOLDER, |
|
|
index_name=INDEX_NAME, |
|
|
override=True, |
|
|
) |
|
|
|
|
|
print("[INIT] [3] Баримтуудыг embedding болгож байна...") |
|
|
doc_embeddings = model.encode( |
|
|
doc_texts, |
|
|
batch_size=16, |
|
|
is_query=False, |
|
|
show_progress_bar=True, |
|
|
) |
|
|
|
|
|
|
|
|
if isinstance(doc_embeddings, torch.Tensor): |
|
|
print(f"[INIT] Документ embedding: torch.Tensor, shape={doc_embeddings.shape}") |
|
|
|
|
|
if doc_embeddings.ndim == 3: |
|
|
_, num_tokens, dim = doc_embeddings.shape |
|
|
print(f"[INIT] -> num_tokens={num_tokens}, dim={dim}") |
|
|
elif doc_embeddings.ndim == 2: |
|
|
_, dim = doc_embeddings.shape |
|
|
print(f"[INIT] -> dim={dim}") |
|
|
elif isinstance(doc_embeddings, list): |
|
|
print(f"[INIT] Документ embedding: list, len={len(doc_embeddings)}") |
|
|
if len(doc_embeddings) > 0 and isinstance(doc_embeddings[0], torch.Tensor): |
|
|
print(f"[INIT] -> Эхний embedding shape={doc_embeddings[0].shape}") |
|
|
else: |
|
|
print(f"[INIT] Документ embedding-ийн төрөл={type(doc_embeddings)}") |
|
|
|
|
|
print("[INIT] [4] Индекс рүү бичиж байна...") |
|
|
index.add_documents( |
|
|
documents_ids=doc_ids, |
|
|
documents_embeddings=doc_embeddings, |
|
|
) |
|
|
|
|
|
print("[INIT] Индекс барих дууслаа.") |
|
|
return model, index |
|
|
|
|
|
|
|
|
def query_index(model, index, q_text, topk=5): |
|
|
|
|
|
print("\n[QUERY] ===============================") |
|
|
print(f"[QUERY] Шинэ асуулт ирлээ: {q_text!r}") |
|
|
print(f"[QUERY] Тэмдэгтийн урт: {len(q_text)}") |
|
|
print(f"[QUERY] Тухайн query-г CHUNK хийхгүй, бүтнээр нь нэг текст гэж үзнэ.") |
|
|
|
|
|
retriever = retrieve.ColBERT(index=index) |
|
|
|
|
|
print("[QUERY] ColBERT-оор encoding хийж байна (is_query=True)...") |
|
|
q_emb = model.encode( |
|
|
[q_text], |
|
|
batch_size=1, |
|
|
is_query=True, |
|
|
show_progress_bar=False, |
|
|
) |
|
|
|
|
|
|
|
|
if isinstance(q_emb, torch.Tensor): |
|
|
print(f"[QUERY] Query embedding төрөл: torch.Tensor, shape={q_emb.shape}") |
|
|
if q_emb.ndim == 3: |
|
|
batch, num_tokens, dim = q_emb.shape |
|
|
print(f"[QUERY] -> batch={batch}, num_tokens={num_tokens}, dim={dim}") |
|
|
elif q_emb.ndim == 2: |
|
|
batch, dim = q_emb.shape |
|
|
print(f"[QUERY] -> batch={batch}, dim={dim}") |
|
|
elif q_emb.ndim == 1: |
|
|
dim = q_emb.shape[0] |
|
|
print(f"[QUERY] -> dim={dim}") |
|
|
elif isinstance(q_emb, list): |
|
|
print(f"[QUERY] Query embedding төрөл: list, len={len(q_emb)}") |
|
|
if len(q_emb) > 0 and isinstance(q_emb[0], torch.Tensor): |
|
|
print(f"[QUERY] -> Эхний элемент shape={q_emb[0].shape}") |
|
|
shape = q_emb[0].shape |
|
|
if len(shape) == 2: |
|
|
num_tokens, dim = shape |
|
|
print(f"[QUERY] num_tokens={num_tokens}, dim={dim}") |
|
|
else: |
|
|
print(f"[QUERY] Query embedding төрөл: {type(q_emb)}") |
|
|
|
|
|
print(f"[QUERY] PLAID индексээс topk={topk} хэсгийг авч байна (index='{INDEX_NAME}') ...") |
|
|
results = retriever.retrieve( |
|
|
queries_embeddings=q_emb, |
|
|
k=topk, |
|
|
) |
|
|
|
|
|
hits = results[0] |
|
|
print(f"[QUERY] Нийт олдсон hits={len(hits)}") |
|
|
for rank, hit in enumerate(hits, start=1): |
|
|
doc_id = hit.get("id") |
|
|
score = hit.get("score") |
|
|
print(f"[QUERY] -> Rank {rank}: id={doc_id}, score={score}") |
|
|
|
|
|
print("[QUERY] ===============================\n") |
|
|
return results |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
|
|
|
print("[INIT] Баримтуудыг уншиж байна...") |
|
|
DOC_IDS, DOC_TEXTS, DOC_MAP = load_documents(DOCS_DIR, CHUNK_CHARS) |
|
|
|
|
|
print("[INIT] Индекс барьж байна (эсвэл дахин үүсгэж байна)...") |
|
|
MODEL, INDEX = build_index(DOC_IDS, DOC_TEXTS) |
|
|
|
|
|
|
|
|
class SearchRequest(BaseModel): |
|
|
query: str |
|
|
topk: int = 5 |
|
|
|
|
|
|
|
|
@app.post("/search") |
|
|
def search(req: SearchRequest): |
|
|
""" |
|
|
POST /search |
|
|
{ |
|
|
"query": "асуулгаа энд", |
|
|
"topk": 5 |
|
|
} |
|
|
""" |
|
|
print("[API] /search дуудлаа") |
|
|
print(f"[API] -> query={req.query!r}, topk={req.topk}") |
|
|
|
|
|
results = query_index(MODEL, INDEX, req.query, topk=req.topk) |
|
|
|
|
|
hits = [] |
|
|
for hit in results[0]: |
|
|
doc_id = hit.get("id") |
|
|
score = hit.get("score") |
|
|
text = DOC_MAP.get(doc_id, "") |
|
|
hits.append({ |
|
|
"id": doc_id, |
|
|
"score": score, |
|
|
"text_preview": text[:500], |
|
|
}) |
|
|
print("[API] /search дууслаа, хэрэглэгч рүү JSON буцааж байна.\n") |
|
|
|
|
|
return {"results": hits} |
|
|
|