| | from __future__ import annotations |
| |
|
| | |
| |
|
| | import argparse |
| | import csv |
| | import re |
| | import time |
| | from pathlib import Path |
| | from typing import Dict, Tuple |
| |
|
| | try: |
| | from scripts.defextra_markers import normalize_paper_id |
| | except ModuleNotFoundError as exc: |
| | if exc.name != "scripts": |
| | raise |
| | import sys |
| |
|
| | PROJECT_ROOT = Path(__file__).resolve().parent.parent |
| | if str(PROJECT_ROOT) not in sys.path: |
| | sys.path.insert(0, str(PROJECT_ROOT)) |
| | from scripts.defextra_markers import normalize_paper_id |
| |
|
| |
|
| | def _normalize_title(title: str) -> str: |
| | return " ".join((title or "").lower().split()) |
| |
|
| |
|
| | def _load_csv(path: Path) -> list[dict]: |
| | with path.open(encoding="utf-8", newline="") as handle: |
| | return list(csv.DictReader(handle)) |
| |
|
| |
|
| | def _parse_missing_report(path: Path) -> Tuple[list[str], list[str]]: |
| | missing_defs: list[str] = [] |
| | missing_ctxs: list[str] = [] |
| | if not path.exists(): |
| | return missing_defs, missing_ctxs |
| | section = None |
| | for line in path.read_text(encoding="utf-8").splitlines(): |
| | line = line.strip() |
| | if line.startswith("Missing definitions"): |
| | section = "def" |
| | continue |
| | if line.startswith("Missing contexts"): |
| | section = "ctx" |
| | continue |
| | if not line.startswith("-"): |
| | continue |
| | item = line[1:].strip() |
| | if section == "def": |
| | missing_defs.append(item) |
| | elif section == "ctx": |
| | missing_ctxs.append(item) |
| | return missing_defs, missing_ctxs |
| |
|
| |
|
| | def _index_recent_pdfs( |
| | pdf_dir: Path, |
| | cutoff_ts: float, |
| | ) -> Dict[str, Path]: |
| | index: Dict[str, Path] = {} |
| | if not pdf_dir.exists(): |
| | return index |
| | version_re = re.compile(r"^(?P<base>.+?)(v\d+)$", re.IGNORECASE) |
| | arxiv_re = re.compile(r"^(?P<base>\d{4}\.\d{4,5})v\d+$", re.IGNORECASE) |
| | pii_re = re.compile(r"(S\d{8,})", re.IGNORECASE) |
| | for suffix in ("*.pdf", "*.PDF"): |
| | for path in pdf_dir.rglob(suffix): |
| | try: |
| | if path.stat().st_mtime < cutoff_ts: |
| | continue |
| | except OSError: |
| | continue |
| | stem = path.stem |
| | keys = {stem, stem.lower(), normalize_paper_id(stem)} |
| | if stem.startswith("paper_"): |
| | stripped = stem[len("paper_") :] |
| | keys.update( |
| | {stripped, stripped.lower(), normalize_paper_id(stripped)}, |
| | ) |
| | if stem.endswith("_fixed") or stem.endswith("-fixed"): |
| | base = ( |
| | stem[: -len("_fixed")] |
| | if stem.endswith("_fixed") |
| | else stem[: -len("-fixed")] |
| | ) |
| | if base: |
| | keys.update({base, base.lower(), normalize_paper_id(base)}) |
| | match = arxiv_re.match(stem) |
| | if match: |
| | base = match.group("base") |
| | keys.update({base, base.lower(), normalize_paper_id(base)}) |
| | match = version_re.match(stem) |
| | if match: |
| | base = match.group("base") |
| | keys.update({base, base.lower(), normalize_paper_id(base)}) |
| | pii_match = pii_re.search(stem) |
| | if pii_match: |
| | pii = pii_match.group(1) |
| | keys.update({pii, pii.lower(), normalize_paper_id(pii)}) |
| | for key in keys: |
| | if key: |
| | index.setdefault(key, path) |
| | return index |
| |
|
| |
|
| | def main() -> None: |
| | parser = argparse.ArgumentParser( |
| | description="Report DefExtra hydration coverage and missing spans.", |
| | ) |
| | parser.add_argument( |
| | "--legal-csv", |
| | type=Path, |
| | default=Path("results/paper_results/defextra_legal_tablefix.csv"), |
| | help="Legal CSV used for hydration.", |
| | ) |
| | parser.add_argument( |
| | "--legal-report", |
| | type=Path, |
| | default=Path( |
| | "results/paper_results/defextra_legal_tablefix_report.txt", |
| | ), |
| | help="Report generated by prepare_defextra_legal.py.", |
| | ) |
| | parser.add_argument( |
| | "--hydrated-csv", |
| | type=Path, |
| | default=Path( |
| | "results/paper_results/defextra_hydrated_tablefix_test.csv", |
| | ), |
| | help="Hydrated CSV from hydrate_defextra.py.", |
| | ) |
| | parser.add_argument( |
| | "--pdf-dir", |
| | type=Path, |
| | default=Path("ManualPDFsGROBID/manual_pdfs/manual_pdfs"), |
| | help="Directory with user PDFs (used to tag recent downloads).", |
| | ) |
| | parser.add_argument( |
| | "--recent-days", |
| | type=int, |
| | default=7, |
| | help="How many days count as 'recent' for PDF downloads.", |
| | ) |
| | parser.add_argument( |
| | "--output", |
| | type=Path, |
| | default=None, |
| | help="Optional report output path.", |
| | ) |
| | args = parser.parse_args() |
| |
|
| | legal_rows = _load_csv(args.legal_csv) |
| | hydrated_rows = ( |
| | _load_csv(args.hydrated_csv) if args.hydrated_csv.exists() else [] |
| | ) |
| |
|
| | ref_ids = { |
| | row.get("paper_id", "") for row in legal_rows if row.get("paper_id") |
| | } |
| | hyd_ids = { |
| | row.get("paper_id", "") for row in hydrated_rows if row.get("paper_id") |
| | } |
| | missing_papers = sorted(ref_ids - hyd_ids) |
| |
|
| | missing_defs, missing_ctxs = _parse_missing_report(args.legal_report) |
| |
|
| | idx = { |
| | (row.get("paper_id", ""), row.get("concept", "")): row |
| | for row in legal_rows |
| | } |
| |
|
| | implicit_defs = [] |
| | implicit_ctxs = [] |
| | for item in missing_defs: |
| | try: |
| | pid, concept = [p.strip() for p in item.split("|", 1)] |
| | except ValueError: |
| | continue |
| | row = idx.get((pid, concept)) |
| | if ( |
| | row |
| | and (row.get("definition_type") or "").strip().lower() |
| | == "implicit" |
| | ): |
| | implicit_defs.append(item) |
| | for item in missing_ctxs: |
| | try: |
| | pid, concept = [p.strip() for p in item.split("|", 1)] |
| | except ValueError: |
| | continue |
| | row = idx.get((pid, concept)) |
| | if ( |
| | row |
| | and (row.get("definition_type") or "").strip().lower() |
| | == "implicit" |
| | ): |
| | implicit_ctxs.append(item) |
| |
|
| | cutoff_ts = time.time() - (args.recent_days * 86400) |
| | recent_index = _index_recent_pdfs(args.pdf_dir, cutoff_ts) |
| |
|
| | recent_missing_defs = [] |
| | recent_missing_ctxs = [] |
| | recent_missing_papers = [] |
| |
|
| | for pid in missing_papers: |
| | if pid in recent_index or normalize_paper_id(pid) in recent_index: |
| | recent_missing_papers.append(pid) |
| |
|
| | for item in missing_defs: |
| | try: |
| | pid, concept = [p.strip() for p in item.split("|", 1)] |
| | except ValueError: |
| | continue |
| | if pid in recent_index or normalize_paper_id(pid) in recent_index: |
| | recent_missing_defs.append(item) |
| |
|
| | for item in missing_ctxs: |
| | try: |
| | pid, concept = [p.strip() for p in item.split("|", 1)] |
| | except ValueError: |
| | continue |
| | if pid in recent_index or normalize_paper_id(pid) in recent_index: |
| | recent_missing_ctxs.append(item) |
| |
|
| | lines = [] |
| | lines.append(f"Missing papers (no hydrated rows): {len(missing_papers)}") |
| | for pid in missing_papers: |
| | lines.append(f"- {pid}") |
| | lines.append("") |
| | lines.append( |
| | f"Missing definition spans marked implicit: {len(implicit_defs)}", |
| | ) |
| | for item in implicit_defs: |
| | lines.append(f"- {item}") |
| | lines.append("") |
| | lines.append( |
| | f"Missing context spans marked implicit: {len(implicit_ctxs)}", |
| | ) |
| | for item in implicit_ctxs: |
| | lines.append(f"- {item}") |
| | lines.append("") |
| | lines.append( |
| | f"Missing papers with recent PDFs (<= {args.recent_days} days): " |
| | f"{len(recent_missing_papers)}", |
| | ) |
| | for pid in recent_missing_papers: |
| | lines.append(f"- {pid}") |
| | lines.append("") |
| | lines.append( |
| | f"Missing definition spans with recent PDFs (<= {args.recent_days} days): " |
| | f"{len(recent_missing_defs)}", |
| | ) |
| | for item in recent_missing_defs: |
| | lines.append(f"- {item}") |
| | lines.append("") |
| | lines.append( |
| | f"Missing context spans with recent PDFs (<= {args.recent_days} days): " |
| | f"{len(recent_missing_ctxs)}", |
| | ) |
| | for item in recent_missing_ctxs: |
| | lines.append(f"- {item}") |
| | lines.append("") |
| |
|
| | output = "\n".join(lines) + "\n" |
| | if args.output is not None: |
| | args.output.parent.mkdir(parents=True, exist_ok=True) |
| | args.output.write_text(output, encoding="utf-8") |
| | print(f"Wrote report to {args.output}") |
| | else: |
| | print(output) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|