\ #!/usr/bin/env python3 from __future__ import annotations import argparse import json from pathlib import Path from typing import Any, Dict, Iterable, Tuple REQUIRED_FIELDS = {"id","created","topic","task_type","difficulty","instruction","input","output","metadata","hash"} def iter_jsonl(path: Path, max_rows: int | None) -> Iterable[Dict[str, Any]]: n = 0 with path.open("r", encoding="utf-8") as f: for line in f: line = line.strip() if not line: continue try: obj = json.loads(line) except Exception: yield {"__parse_error__": True} n += 1 if max_rows and n >= max_rows: return continue yield obj n += 1 if max_rows and n >= max_rows: return def is_tool_trace_valid(out: Any) -> bool: # Accept either a parsed list OR a JSON string representing a list if isinstance(out, str): s = out.strip() if not s: return False try: out = json.loads(s) except Exception: return False if not isinstance(out, list) or len(out) == 0: return False for step in out: if not isinstance(step, dict): return False if "tool" not in step or "args" not in step: return False if not isinstance(step["tool"], str) or not isinstance(step["args"], dict): return False return True def is_patch_diff_valid(out: Any) -> bool: if not isinstance(out, str): return False s = out.strip() if "```diff" in s: # fenced diff return ("-" in s and "+" in s) # also allow plain diff-like strings return s.startswith("-") or s.startswith("diff") or ("- " in s and "+ " in s) def is_self_grade_valid(out: Any) -> bool: if isinstance(out, str): try: out = json.loads(out) except Exception: return False if not isinstance(out, dict): return False score = out.get("score") conf = out.get("confidence") notes = out.get("notes") if not (isinstance(score, int) and 0 <= score <= 10): return False if not (isinstance(conf, (int, float)) and 0.0 <= float(conf) <= 1.0): return False if not (isinstance(notes, str) and len(notes) >= 3): return False return True def main() -> int: ap = argparse.ArgumentParser(description="Genesis AI Code Bench (Within Us AI)") ap.add_argument("--jsonl", required=True, help="Path to JSONL file") ap.add_argument("--max_rows", type=int, default=5000) args = ap.parse_args() path = Path(args.jsonl) total = 0 format_valid = 0 required_ok = 0 tool_ok = tool_total = 0 diff_ok = diff_total = 0 grade_ok = grade_total = 0 gov_ok = 0 econ_ok = 0 hashes = set() hash_total = 0 for obj in iter_jsonl(path, args.max_rows): total += 1 if obj.get("__parse_error__"): continue format_valid += 1 keys = set(obj.keys()) if REQUIRED_FIELDS.issubset(keys): required_ok += 1 # hash / uniqueness h = obj.get("hash") if isinstance(h, str) and h: hash_total += 1 hashes.add(h) # governance + economics (metadata) md = obj.get("metadata", {}) if isinstance(md, dict): if md.get("audit_required") is True: gov_ok += 1 if isinstance(md.get("cost_budget"), int) and isinstance(md.get("latency_ms_target"), int): econ_ok += 1 # task-specific validity task = obj.get("task_type") out = obj.get("output") if task == "tool_trace": tool_total += 1 if is_tool_trace_valid(out): tool_ok += 1 elif task == "patch_diff": diff_total += 1 if is_patch_diff_valid(out): diff_ok += 1 elif task == "self_grade": grade_total += 1 if is_self_grade_valid(out): grade_ok += 1 def rate(num: int, den: int) -> float: return 0.0 if den == 0 else round(num / den, 4) report = { "rows_scanned": total, "format_valid_rate": rate(format_valid, total), "required_fields_rate": rate(required_ok, total), "tool_trace_valid_rate": rate(tool_ok, tool_total), "patch_diff_valid_rate": rate(diff_ok, diff_total), "self_grade_valid_rate": rate(grade_ok, grade_total), "governance_present_rate": rate(gov_ok, total), "economics_present_rate": rate(econ_ok, total), "uniqueness_rate": rate(len(hashes), hash_total), } print(json.dumps(report, indent=2)) return 0 if __name__ == "__main__": raise SystemExit(main())