| # # app.py | |
| # import os | |
| # import re | |
| # import textwrap | |
| # from typing import List, Dict, Any, Tuple | |
| # from datetime import datetime | |
| # from fastapi import FastAPI, Request | |
| # from fastapi.responses import HTMLResponse | |
| # # --- ColBERT / PLAID --- | |
| # from pylate import models, indexes, retrieve | |
| # # ================== Config ================== | |
| # MODEL_PATH = "/home/astgpu3/workspace/bolorjinbat/LFM2-ColBERT-350M" | |
| # DOCS_FOLDER = "huuli_docs" # .txt files used only for showing text | |
| # INDEX_FOLDER = "huuli_index_storage_v2" # existing index folder | |
| # INDEX_NAME = "huuli_index_v2" # existing index name | |
| # PREVIEW_LEN = 300 | |
| # TOPK = 5 | |
| # CHUNK_CHARS = 800 # fallback for non-structured files | |
| # # Where to log all questions + answers | |
| # ANSWERS_DIR = "/home/astgpu3/workspace/bolorjinbat/LFM2-ColBERT-350M/answers" | |
| # ANSWERS_FILE = os.path.join(ANSWERS_DIR, "huuli_test_data.txt") | |
| # # ============================================ | |
| # # --------- Globals loaded at startup --------- | |
| # app = FastAPI(title="Huuli Search (FastAPI)") | |
| # MODEL = None | |
| # INDEX = None | |
| # RETRIEVER = None | |
| # DOC_MAP: Dict[str, str] = {} # doc_id -> full text to display | |
| # RE_BLOCK = re.compile( | |
| # r"ID:\s*(?P<id>.+?)\s*" | |
| # r"Number:\s*(?P<number>.+?)\s*" | |
| # r"Header:\s*(?P<header>.+?)\s*" | |
| # r"Content:\s*(?P<content>.+?)(?=\nID:|\Z)", | |
| # flags=re.DOTALL | |
| # ) | |
| # DIVIDER_RE = re.compile(r"^\s*-{3,}\s*$", flags=re.MULTILINE) | |
| # def _normalize_spaces(s: str) -> str: | |
| # return " ".join(s.split()) | |
| # def _read_file(fname: str) -> str: | |
| # p = os.path.join(DOCS_FOLDER, fname) | |
| # if not os.path.isfile(p): | |
| # return "" | |
| # try: | |
| # return open(p, "r", encoding="utf-8").read().strip() | |
| # except Exception: | |
| # return "" | |
| # def _shorten_filename_for_index(fname: str) -> str: | |
| # """ | |
| # 'gpt-data-collector.huuli_zui.txt' -> 'huuli_zui.txt' | |
| # (keeps compatibility with index ids) | |
| # """ | |
| # if not fname.endswith(".txt"): | |
| # return fname | |
| # stem = fname[:-4] | |
| # short_stem = stem.split(".")[-1] if "." in stem else stem | |
| # return short_stem + ".txt" | |
| # def _slug(s: str) -> str: | |
| # # safe slug for ids (avoid spaces and slashes) | |
| # s = re.sub(r"\s+", "_", s.strip()) | |
| # s = s.replace("/", "_") | |
| # return s | |
| # def _parse_records(raw: str) -> List[Dict[str, str]]: | |
| # """ | |
| # Parse structured records and drop '-----' divider lines. | |
| # Returns list of dicts with id, number, header, content. | |
| # """ | |
| # if not raw: | |
| # return [] | |
| # # remove divider lines to avoid garbage inside content | |
| # cleaned = DIVIDER_RE.sub("\n", raw) | |
| # recs = [] | |
| # for m in RE_BLOCK.finditer(cleaned): | |
| # recs.append({ | |
| # "id": m.group("id").strip(), | |
| # "number": m.group("number").strip(), | |
| # "header": m.group("header").strip(), | |
| # "content": m.group("content").strip(), | |
| # }) | |
| # return recs | |
| # def _group_by_number_prefix(records: List[Dict[str, str]], fname: str | |
| # ) -> List[Tuple[str, str]]: | |
| # """ | |
| # Merge CONTIGUOUS records that share the same Number (prefix). | |
| # Example: Number '1.5' appearing across multiple records (7,8,9 points) | |
| # will be combined into a single display group, then the next different | |
| # Number starts a new group. | |
| # Returns list of (group_id, group_text). | |
| # """ | |
| # groups: List[Tuple[str, str]] = [] | |
| # if not records: | |
| # return groups | |
| # def group_id(number: str, idx: int) -> str: | |
| # # ID looks like: <fname>__numgrp_<number_slug>__group_<idx> | |
| # return f"{fname}__numgrp_{_slug(number)}__group_{idx}" | |
| # current_number = None | |
| # current_parts: List[str] = [] | |
| # gi = 0 | |
| # def flush(): | |
| # nonlocal gi, current_parts, current_number | |
| # if current_parts: | |
| # gid = group_id(current_number or "UNKNOWN", gi) | |
| # groups.append((gid, "\n\n".join(current_parts).strip())) | |
| # gi += 1 | |
| # current_parts = [] | |
| # for rec in records: | |
| # num = rec.get("number", "").strip() | |
| # # when Number changes, close previous group | |
| # if current_number is None or num != current_number: | |
| # flush() | |
| # current_number = num | |
| # block_lines = [] | |
| # if rec.get("id"): block_lines.append(f"ID: {rec['id']}") | |
| # if num: block_lines.append(f"Number: {num}") | |
| # if rec.get("header"): block_lines.append(f"Header: {rec['header']}") | |
| # if rec.get("content"): | |
| # block_lines.append(f"Content: {rec['content']}") | |
| # current_parts.append("\n".join(block_lines)) | |
| # flush() | |
| # return groups | |
| # def load_doc_map(folder_path: str = DOCS_FOLDER, | |
| # chunk_chars: int = CHUNK_CHARS) -> Dict[str, str]: | |
| # """ | |
| # Build a map {doc_id: full_text_for_display}. | |
| # Priority: | |
| # 1) If file is structured, create NUMBER-GROUPED doc ids: | |
| # '<fname>__numgrp_<number_slug>__group_<i>' | |
| # Also add a second key using the shortened file name for index compatibility. | |
| # 2) If not structured, fall back to naive chunks ('__chunk_i'). | |
| # We ALSO keep per-record ids ('__rec_i') for backward compatibility. | |
| # """ | |
| # doc_map: Dict[str, str] = {} | |
| # if not os.path.isdir(folder_path): | |
| # print(f"⚠️ DOCS folder not found: {os.path.abspath(folder_path)}") | |
| # return doc_map | |
| # for fname in sorted(os.listdir(folder_path)): | |
| # if not fname.endswith(".txt"): | |
| # continue | |
| # base = fname | |
| # base_short = _shorten_filename_for_index(base) | |
| # try: | |
| # raw = open(os.path.join(folder_path, fname), "r", encoding="utf-8").read().strip() | |
| # except Exception as e: | |
| # print(f"⚠️ Read error {fname}: {e}") | |
| # continue | |
| # records = _parse_records(raw) | |
| # if records: | |
| # # ---- 1) store per-record (compat) ---- | |
| # for i, rec in enumerate(records): | |
| # content_clean = _normalize_spaces(rec.get("content", "")) | |
| # if not content_clean: | |
| # continue | |
| # doc_map[f"{base}__rec_{i}"] = content_clean | |
| # doc_map[f"{base_short}__rec_{i}"] = content_clean | |
| # # ---- 2) store grouped-by-number for display/index ---- | |
| # groups = _group_by_number_prefix(records, base) | |
| # for gid, gtxt in groups: | |
| # gtxt_clean = _normalize_spaces(gtxt) | |
| # if not gtxt_clean: | |
| # continue | |
| # # with long name | |
| # doc_map[gid] = gtxt_clean | |
| # # with short name to match index doc ids that might use shortened base | |
| # gid_short = gid.replace(base, base_short, 1) | |
| # doc_map[gid_short] = gtxt_clean | |
| # continue | |
| # # Not structured -> fallback to naive wrap | |
| # chunks = textwrap.wrap(raw, width=chunk_chars, break_long_words=False) or [raw] | |
| # for i, c in enumerate(chunks): | |
| # text_clean = _normalize_spaces(c.strip()) | |
| # if not text_clean: | |
| # continue | |
| # doc_map[f"{base}__chunk_{i}"] = text_clean | |
| # doc_map[f"{base_short}__chunk_{i}"] = text_clean | |
| # print(f"[INFO] DOC_MAP ready with {len(doc_map)} entries") | |
| # return doc_map | |
| # # ------------- Model + Index ---------------- | |
| # def load_model_and_index(): | |
| # print("[1] Loading model…") | |
| # model = models.ColBERT(model_name_or_path=MODEL_PATH) | |
| # if getattr(model, "tokenizer", None) and model.tokenizer.pad_token is None: | |
| # model.tokenizer.pad_token = model.tokenizer.eos_token | |
| # index_root = os.path.abspath(INDEX_FOLDER) | |
| # if not os.path.isdir(index_root): | |
| # raise FileNotFoundError( | |
| # f"Index folder not found: {index_root}\nCheck INDEX_FOLDER/INDEX_NAME." | |
| # ) | |
| # print("[2] Opening existing PLAID index…") | |
| # idx = indexes.PLAID(index_folder=INDEX_FOLDER, index_name=INDEX_NAME, override=False) | |
| # if hasattr(idx, "load"): | |
| # idx.load() | |
| # ret = retrieve.ColBERT(index=idx) | |
| # return model, idx, ret | |
| # # ---------- Fallback extractors (compat) ---------- | |
| # def _extract_text_by_rec(fname: str, rec_idx: int) -> str: | |
| # full = _read_file(fname) or _read_file(_shorten_filename_for_index(fname)) | |
| # records = _parse_records(full) | |
| # if 0 <= rec_idx < len(records): | |
| # return _normalize_spaces(records[rec_idx].get("content", "")) | |
| # return "" | |
| # def _extract_text_by_chunk(fname: str, chunk_idx: int) -> str: | |
| # full = _read_file(fname) or _read_file(_shorten_filename_for_index(fname)) | |
| # chunks = textwrap.wrap(full, width=CHUNK_CHARS, break_long_words=False) or [full] | |
| # if 0 <= chunk_idx < len(chunks): | |
| # return _normalize_spaces(chunks[chunk_idx].strip()) | |
| # return "" | |
| # def _text_for_docid(doc_id: str) -> str: | |
| # """ | |
| # Lookup order: | |
| # 1) DOC_MAP (handles __numgrp_, __rec_, __chunk_ with short/long names) | |
| # 2) Reconstruct (best-effort) for __rec_ and __chunk_ | |
| # (reconstruction for __numgrp_ is not needed if DOC_MAP is loaded, | |
| # because we generate ids at startup deterministically). | |
| # """ | |
| # if not doc_id: | |
| # return "" | |
| # t = DOC_MAP.get(doc_id) | |
| # if t: | |
| # return t | |
| # # Fallback reconstruction for legacy ids: | |
| # if "__rec_" in doc_id: | |
| # fname, idx = doc_id.split("__rec_") | |
| # try: | |
| # return _extract_text_by_rec(fname, int(idx)) | |
| # except Exception: | |
| # return "" | |
| # if "__chunk_" in doc_id: | |
| # fname, idx = doc_id.split("__chunk_") | |
| # try: | |
| # return _extract_text_by_chunk(fname, int(idx)) | |
| # except Exception: | |
| # return "" | |
| # return "" | |
| # # ---------------- Search ------------------- | |
| # def search(q: str, topk: int = TOPK) -> List[Dict[str, Any]]: | |
| # q_emb = MODEL.encode([q], batch_size=1, is_query=True, show_progress_bar=False) | |
| # hits = RETRIEVER.retrieve(queries_embeddings=q_emb, k=topk)[0] | |
| # results = [] | |
| # for rank, h in enumerate(hits, start=1): | |
| # doc_id = h.get("id") | |
| # score = h.get("score") | |
| # full_text = _text_for_docid(doc_id) | |
| # preview = full_text[:PREVIEW_LEN] | |
| # results.append({ | |
| # "rank": rank, | |
| # "score": float(score) if isinstance(score, (int, float)) else score, | |
| # "doc_id": doc_id, | |
| # "preview": preview, | |
| # "full": full_text | |
| # }) | |
| # return results | |
| # # ---------- Persist answers ---------- | |
| # def save_answers(question: str, results: List[Dict[str, Any]]): | |
| # os.makedirs(ANSWERS_DIR, exist_ok=True) | |
| # ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| # lines = [] | |
| # lines.append("=".ljust(80, "=")) | |
| # lines.append(f"TIMESTAMP: {ts}") | |
| # lines.append(f"QUESTION : {question}") | |
| # lines.append(f"TOPK : {len(results)}") | |
| # for r in results: | |
| # lines.append("-".ljust(80, "-")) | |
| # lines.append(f"Rank : {r['rank']}") | |
| # lines.append(f"Score: {r['score']}") | |
| # lines.append(f"DocID: {r['doc_id']}") | |
| # lines.append("Preview:") | |
| # lines.append(r["preview"]) | |
| # lines.append("") | |
| # lines.append("Full:") | |
| # lines.append(r["full"]) | |
| # lines.append("\n") | |
| # with open(ANSWERS_FILE, "a", encoding="utf-8") as f: | |
| # f.write("\n".join(lines)) | |
| # # ------------------- UI ------------------- | |
| # HTML_PAGE = """ | |
| # <!doctype html> | |
| # <html lang="en"> | |
| # <head> | |
| # <meta charset="utf-8" /> | |
| # <title>Huuli Search</title> | |
| # <meta name="viewport" content="width=device-width, initial-scale=1" /> | |
| # <style> | |
| # body{font-family:system-ui,-apple-system,Segoe UI,Roboto,Ubuntu,"Helvetica Neue",Arial,sans-serif;margin:0;background:#0b1020;color:#f3f6ff} | |
| # header{padding:20px;border-bottom:1px solid #1b2140;background:#0f1530;position:sticky;top:0} | |
| # .wrap{max-width:980px;margin:0 auto;padding:16px} | |
| # form{display:flex;gap:8px} | |
| # input[type=text]{flex:1;padding:12px 14px;border-radius:10px;border:1px solid #283056;background:#111737;color:#e9edff} | |
| # button{padding:12px 16px;border-radius:10px;border:1px solid #2b6df6;background:#2b6df6;color:white;cursor:pointer} | |
| # button.secondary{background:transparent;border-color:#3a4a86;color:#cdd5ff} | |
| # .hint{opacity:.8;font-size:14px;margin-top:6px} | |
| # .card{background:#0f1530;border:1px solid #1d2550;border-radius:14px;padding:16px;margin:14px 0} | |
| # .meta{display:flex;gap:12px;font-size:12px;opacity:.8;margin-bottom:6px} | |
| # .docid{overflow-wrap:anywhere;color:#9fb0ff} | |
| # .preview{white-space:pre-wrap} | |
| # .full{display:none;white-space:pre-wrap;margin-top:8px;border-top:1px dashed #2a356f;padding-top:8px} | |
| # </style> | |
| # <script> | |
| # function toggle(id){ | |
| # const full = document.getElementById('full_'+id); | |
| # const btn = document.getElementById('btn_'+id); | |
| # if(!full) return; | |
| # const vis = full.style.display === 'block'; | |
| # full.style.display = vis ? 'none' : 'block'; | |
| # btn.textContent = vis ? 'Continue' : 'Collapse'; | |
| # } | |
| # </script> | |
| # </head> | |
| # <body> | |
| # <header> | |
| # <div class="wrap"> | |
| # <form method="get" action="/"> | |
| # <input type="text" name="q" value="{{ q | e }}" placeholder="Асуултаа бичнэ үү…" autofocus /> | |
| # <button type="submit">Search</button> | |
| # <a href="/" style="text-decoration:none"><button type="button" class="secondary">Clear</button></a> | |
| # </form> | |
| # <div class="hint">TOPK={{ topk }} · Preview first; click <b>Continue</b> to show the whole text.</div> | |
| # </div> | |
| # </header> | |
| # <main> | |
| # <div class="wrap"> | |
| # {% if q and not results %} | |
| # <div class="card">No results.</div> | |
| # {% endif %} | |
| # {% for r in results %} | |
| # <div class="card"> | |
| # <div class="meta"> | |
| # <div>Rank: <b>{{ r.rank }}</b></div> | |
| # <div>Score: <b>{{ '%.6f' % r.score if r.score is number else r.score }}</b></div> | |
| # </div> | |
| # <div class="docid">DocID: {{ r.doc_id }}</div> | |
| # <p class="preview"> | |
| # {{ r.preview }}{% if r.full|length > r.preview|length %}…{% endif %} | |
| # </p> | |
| # <button id="btn_{{ loop.index0 }}" onclick="toggle('{{ loop.index0 }}')">Continue</button> | |
| # <div class="full" id="full_{{ loop.index0 }}">{{ r.full }}</div> | |
| # </div> | |
| # {% endfor %} | |
| # </div> | |
| # </main> | |
| # </body> | |
| # </html> | |
| # """ | |
| # from jinja2 import Environment, BaseLoader, select_autoescape | |
| # JINJA_ENV = Environment( | |
| # loader=BaseLoader(), | |
| # autoescape=select_autoescape(['html', 'xml']) | |
| # ) | |
| # @app.get("/", response_class=HTMLResponse) | |
| # async def home(request: Request, q: str = "", topk: int = TOPK): | |
| # results = search(q, topk) if q.strip() else [] | |
| # if q.strip(): | |
| # save_answers(q, results) | |
| # template = JINJA_ENV.from_string(HTML_PAGE) | |
| # return template.render(q=q, topk=topk, results=results) | |
| # # -------------- Startup -------------- | |
| # @app.on_event("startup") | |
| # def _startup(): | |
| # global MODEL, INDEX, RETRIEVER, DOC_MAP | |
| # os.makedirs(ANSWERS_DIR, exist_ok=True) | |
| # DOC_MAP = load_doc_map(DOCS_FOLDER, CHUNK_CHARS) | |
| # MODEL, INDEX, RETRIEVER = load_model_and_index() | |
| # print(f"✅ Ready. Open http://localhost:8010") | |
| # print(f"📝 Logging to: {ANSWERS_FILE}") | |
| # # -------------- Entrypoint -------------- | |
| # if __name__ == "__main__": | |
| # import uvicorn | |
| # uvicorn.run(app, host="0.0.0.0", port=8010) | |
| # app.py | |
| # app.py | |
| # app.py | |
| import os | |
| from typing import List | |
| import torch | |
| from fastapi import FastAPI, HTTPException, Query | |
| from pydantic import BaseModel | |
| from pylate import models | |
| # ====== CONFIG ====== | |
| MODEL_PATH = "/home/astgpu3/workspace/bolorjinbat/LFM2-ColBERT-350M" | |
| # ==================== | |
| app = FastAPI(title="LFM2-ColBERT Embedding API") | |
| MODEL = None # will be loaded on startup | |
| # ---------- Pydantic models ---------- | |
| class EmbedRequest(BaseModel): | |
| text: str | |
| class EmbedResponse(BaseModel): | |
| text: str | |
| dimension: int | |
| embedding: List[float] | |
| # ---------- Model loading ---------- | |
| def load_model(): | |
| print(f"[1] Loading ColBERT model from: {MODEL_PATH}") | |
| if not os.path.isdir(MODEL_PATH): | |
| raise RuntimeError(f"MODEL_PATH does not exist: {MODEL_PATH}") | |
| model = models.ColBERT(model_name_or_path=MODEL_PATH) | |
| # Fix pad_token if missing | |
| if getattr(model, "tokenizer", None) and model.tokenizer.pad_token is None: | |
| model.tokenizer.pad_token = model.tokenizer.eos_token | |
| print("[2] Model ready.") | |
| return model | |
| # ---------- Embedding helper (with pooling) ---------- | |
| def embed_text(text: str) -> List[float]: | |
| """ | |
| Encode the text into a single embedding vector (1D list of floats). | |
| Handles different shapes from MODEL.encode by mean-pooling if needed. | |
| """ | |
| text = text.strip() | |
| if not text: | |
| return [] | |
| emb = MODEL.encode( | |
| [text], | |
| batch_size=1, | |
| is_query=True, | |
| show_progress_bar=False, | |
| ) | |
| # Ensure we have a torch tensor | |
| if isinstance(emb, torch.Tensor): | |
| t = emb | |
| else: | |
| t = torch.tensor(emb) | |
| # Possible shapes: | |
| # - [batch, tokens, dim] | |
| # - [batch, dim] | |
| # - [tokens, dim] | |
| # - [dim] | |
| if t.ndim == 3: | |
| # [1, tokens, dim] -> mean over tokens -> [1, dim] -> [dim] | |
| t = t.mean(dim=1)[0] | |
| elif t.ndim == 2: | |
| # Either [1, dim] or [tokens, dim] | |
| if t.shape[0] == 1: | |
| # [1, dim] -> [dim] | |
| t = t[0] | |
| else: | |
| # [tokens, dim] -> mean over tokens -> [dim] | |
| t = t.mean(dim=0) | |
| elif t.ndim == 1: | |
| # already [dim] | |
| pass | |
| else: | |
| raise ValueError(f"Unexpected embedding shape from model.encode: {t.shape}") | |
| return t.tolist() | |
| # ---------- API endpoints ---------- | |
| async def embed_post(req: EmbedRequest): | |
| """ | |
| POST /embed | |
| Body: {"text": "your text here"} | |
| Returns: {"text": "...", "dimension": ..., "embedding": [...]} | |
| """ | |
| if not req.text.strip(): | |
| raise HTTPException(status_code=400, detail="text must not be empty") | |
| vec = embed_text(req.text) | |
| return EmbedResponse( | |
| text=req.text, | |
| dimension=len(vec), | |
| embedding=vec, | |
| ) | |
| async def embed_get(q: str = Query("", description="Text to embed")): | |
| """ | |
| GET /embed?q=your+text+here | |
| Returns the same structure as POST /embed. | |
| """ | |
| if not q.strip(): | |
| raise HTTPException(status_code=400, detail="q must not be empty") | |
| vec = embed_text(q) | |
| return EmbedResponse( | |
| text=q, | |
| dimension=len(vec), | |
| embedding=vec, | |
| ) | |
| # ---------- Startup hook ---------- | |
| def _startup(): | |
| global MODEL | |
| MODEL = load_model() | |
| print("✅ Embedding API ready.") | |
| # ---------- Entrypoint ---------- | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="0.0.0.0", port=8092) | |