nazneen commited on
Commit
699332e
·
verified ·
1 Parent(s): b4fd80f

Upload verify.py

Browse files
Files changed (1) hide show
  1. data/verifier/verify.py +517 -0
data/verifier/verify.py ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Verifies the math answers in the dataset against the model outputs.
3
+ The dataset is a single row with the answer and predictions path is a jsonl file with single row of model output.
4
+
5
+ Example:
6
+
7
+ ```
8
+ python notebooks/math_final_answer_verifier.py \
9
+ --dataset-path.jsonl \
10
+ --predictions-path model_output.json \
11
+ --prediction-column model_output
12
+ ```
13
+
14
+ Supported input formats: ``.jsonl``/``.ndjson``, ``.json``, ``.csv``,
15
+ ``.tsv`` and ``.parquet`` (requires pandas).
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import argparse
21
+ import csv
22
+ import json
23
+ import math
24
+ import numbers
25
+ import re
26
+ import sys
27
+ import unicodedata
28
+ from dataclasses import dataclass
29
+ from pathlib import Path
30
+ from typing import Any, Iterable, Sequence
31
+
32
+
33
+ MODEL_OUTPUT_COL = "__model_output"
34
+ ROW_ID_COL = "__row_id"
35
+
36
+ # Regexes that help identify different final-answer formats.
37
+ BOXED_PATTERN = re.compile(r"\\boxed\s*\{([^}]*)\}")
38
+ HASH_PATTERN = re.compile(r"####\s*(.+)")
39
+ FINAL_ANSWER_PATTERNS = [
40
+ re.compile(r"(?i)final answer(?: is)?\s*[:=\-]?\s*(.+)"),
41
+ re.compile(r"(?i)final result(?: is)?\s*[:=\-]?\s*(.+)"),
42
+ re.compile(r"(?i)the answer is\s*(.+)"),
43
+ re.compile(r"(?i)answer(?: is)?\s*[:=\-]?\s*(.+)"),
44
+ re.compile(r"(?i)ans(?: is)?\s*[:=\-]?\s*(.+)"),
45
+ re.compile(r"(?i)result(?: is)?\s*[:=\-]?\s*(.+)"),
46
+ ]
47
+
48
+
49
+ LATEX_TEXT_REPLACEMENTS = {
50
+ "\\leq": "<=",
51
+ "\\le": "<=",
52
+ "\\geq": ">=",
53
+ "\\ge": ">=",
54
+ "\\neq": "!=",
55
+ "\\times": "*",
56
+ "\\cdot": "*",
57
+ "\\div": "/",
58
+ "\\pm": "+-",
59
+ "\\pi": "pi",
60
+ "\\Pi": "pi",
61
+ "\\infty": "inf",
62
+ "\\sqrt": "sqrt",
63
+ "\\Gamma": "gamma",
64
+ "\\Omega": "omega",
65
+ "\\alpha": "alpha",
66
+ "\\beta": "beta",
67
+ "\\gamma": "gamma",
68
+ "\\delta": "delta",
69
+ "\\int": "integral",
70
+ "\\log": "log",
71
+ "\\ln": "ln",
72
+ }
73
+
74
+
75
+ SYMBOL_REPLACEMENTS = {
76
+ "−": "-",
77
+ "–": "-",
78
+ "—": "-",
79
+ "·": "*",
80
+ "×": "*",
81
+ "÷": "/",
82
+ "π": "pi",
83
+ "Π": "pi",
84
+ "∞": "inf",
85
+ "√": "sqrt",
86
+ "≤": "<=",
87
+ "≥": ">=",
88
+ "≠": "!=",
89
+ "∈": "in",
90
+ "∉": "notin",
91
+ "∪": "union",
92
+ "∩": "intersect",
93
+ "′": "'",
94
+ }
95
+
96
+
97
+ OUTPUT_KEYWORDS = {"final_answer", "answer", "prediction", "output", "result"}
98
+
99
+
100
+ @dataclass
101
+ class ComparisonResult:
102
+ is_match: bool
103
+ matched_candidate: str | None
104
+ match_type: str | None
105
+ normalized_gold: str | None
106
+ normalized_candidate: str | None
107
+ candidates: Sequence[str]
108
+
109
+
110
+ def parse_args() -> argparse.Namespace:
111
+ parser = argparse.ArgumentParser(description="Verify math final answers against model outputs")
112
+ parser.add_argument("--dataset-path", required=True, help="Path to the JSONL/CSV/TSV/parquet dataset")
113
+ parser.add_argument("--predictions-path", required=True, help="Path to the predictions file")
114
+ parser.add_argument(
115
+ "--final-answer-column",
116
+ default="final_answer",
117
+ help="Name of the column that holds the ground-truth final answer",
118
+ )
119
+ parser.add_argument(
120
+ "--prediction-column",
121
+ default="model_output",
122
+ help="Name of the column that holds the model response",
123
+ )
124
+ parser.add_argument(
125
+ "--id-column",
126
+ default=None,
127
+ help="Optional column used to align dataset rows with predictions (defaults to row order)",
128
+ )
129
+ parser.add_argument(
130
+ "--dump-results",
131
+ default=None,
132
+ help="Optional path to write the per-row evaluation as JSONL",
133
+ )
134
+ parser.add_argument(
135
+ "--dump-mismatches",
136
+ default=None,
137
+ help="Optional path to write only the mismatched rows as JSONL",
138
+ )
139
+ return parser.parse_args()
140
+
141
+
142
+ def load_records(path: Path) -> list[dict[str, Any]]:
143
+ if not path.exists():
144
+ raise FileNotFoundError(f"Missing file: {path}")
145
+ ext = path.suffix.lower()
146
+ if ext in {".jsonl", ".ndjson"}:
147
+ rows: list[dict[str, Any]] = []
148
+ with path.open(encoding="utf-8") as handle:
149
+ for line in handle:
150
+ line = line.strip()
151
+ if not line:
152
+ continue
153
+ obj = json.loads(line)
154
+ if isinstance(obj, dict):
155
+ rows.append(obj)
156
+ else:
157
+ rows.append({"value": obj})
158
+ return rows
159
+ if ext == ".json":
160
+ content = json.loads(path.read_text(encoding="utf-8"))
161
+ if isinstance(content, list):
162
+ return [dict(row) if isinstance(row, dict) else {"value": row} for row in content]
163
+ if isinstance(content, dict):
164
+ return [content]
165
+ raise ValueError(f"Unsupported JSON structure in {path}")
166
+ if ext in {".csv", ".tsv"}:
167
+ delimiter = "\t" if ext == ".tsv" else ","
168
+ with path.open(encoding="utf-8", newline="") as handle:
169
+ reader = csv.DictReader(handle, delimiter=delimiter)
170
+ return [dict(row) for row in reader]
171
+ if ext == ".parquet":
172
+ try:
173
+ import pandas as pd # type: ignore
174
+ except ImportError as exc: # pragma: no cover - optional dependency
175
+ raise ImportError("Reading parquet files requires pandas and pyarrow.") from exc
176
+ return pd.read_parquet(path).to_dict(orient="records")
177
+ raise ValueError(f"Unsupported file format: {path}")
178
+
179
+
180
+ def align_tables(
181
+ dataset_rows: Sequence[dict[str, Any]],
182
+ prediction_rows: Sequence[dict[str, Any]],
183
+ *,
184
+ id_column: str | None,
185
+ prediction_column: str,
186
+ ) -> tuple[list[dict[str, Any]], str]:
187
+ if not prediction_rows:
188
+ raise ValueError("Predictions file has no rows")
189
+
190
+ if not any(prediction_column in row for row in prediction_rows):
191
+ raise KeyError(f"Prediction column '{prediction_column}' not found in predictions")
192
+
193
+ key_column = ROW_ID_COL
194
+
195
+ if id_column:
196
+ missing_ids = [idx for idx, row in enumerate(dataset_rows) if row.get(id_column) is None]
197
+ if missing_ids:
198
+ raise KeyError(
199
+ f"Dataset rows missing '{id_column}' (first few indices: {missing_ids[:5]})"
200
+ )
201
+ prediction_index: dict[Any, dict[str, Any]] = {}
202
+ for pred in prediction_rows:
203
+ key = pred.get(id_column)
204
+ if key is None:
205
+ continue
206
+ prediction_index[key] = pred
207
+
208
+ aligned: list[dict[str, Any]] = []
209
+ missing = 0
210
+ for idx, row in enumerate(dataset_rows):
211
+ merged = dict(row)
212
+ merged[ROW_ID_COL] = idx
213
+ pred_row = prediction_index.get(row[id_column])
214
+ if pred_row is not None and prediction_column in pred_row:
215
+ merged[MODEL_OUTPUT_COL] = pred_row.get(prediction_column)
216
+ else:
217
+ merged[MODEL_OUTPUT_COL] = None
218
+ missing += 1
219
+ aligned.append(merged)
220
+
221
+ if missing:
222
+ print(
223
+ f"[warn] {missing} dataset rows did not have matching predictions by '{id_column}'",
224
+ file=sys.stderr,
225
+ )
226
+ return aligned, id_column
227
+
228
+ # Fall back to row order when an ID column is not provided.
229
+ align_len = min(len(dataset_rows), len(prediction_rows))
230
+ if align_len == 0:
231
+ raise ValueError("No overlapping rows between dataset and predictions")
232
+ if len(dataset_rows) != len(prediction_rows):
233
+ shorter = "predictions" if len(prediction_rows) < len(dataset_rows) else "dataset"
234
+ print(
235
+ f"[warn] {shorter} has fewer rows (evaluating on {align_len} aligned samples)",
236
+ file=sys.stderr,
237
+ )
238
+ trimmed: list[dict[str, Any]] = []
239
+ for idx in range(align_len):
240
+ merged = dict(dataset_rows[idx])
241
+ merged[ROW_ID_COL] = idx
242
+ merged[MODEL_OUTPUT_COL] = prediction_rows[idx].get(prediction_column)
243
+ trimmed.append(merged)
244
+ return trimmed, ROW_ID_COL
245
+
246
+
247
+ def normalize_answer_text(value: Any) -> str | None:
248
+ if value is None:
249
+ return None
250
+ if isinstance(value, float) and math.isnan(value):
251
+ return None
252
+ text = str(value).strip()
253
+ if not text:
254
+ return None
255
+
256
+ text = unicodedata.normalize("NFKC", text)
257
+ text = strip_leading_label(text)
258
+ for src, dst in SYMBOL_REPLACEMENTS.items():
259
+ text = text.replace(src, dst)
260
+ for src, dst in LATEX_TEXT_REPLACEMENTS.items():
261
+ text = text.replace(src, dst)
262
+
263
+ text = re.sub(r"\\text\s*\{([^}]*)\}", r"\1", text)
264
+ text = re.sub(r"\\boxed\s*\{([^}]*)\}", r"\1", text)
265
+ text = text.replace("\\", "")
266
+ text = text.replace("{", "").replace("}", "")
267
+ text = text.replace("\r", "\n")
268
+ text = text.replace("\t", " ")
269
+ text = re.sub(r"\s+", " ", text).strip()
270
+ if not text:
271
+ return None
272
+ text = text.lower()
273
+ text = text.replace(" ", "")
274
+ return text
275
+
276
+
277
+ def strip_leading_label(text: str) -> str:
278
+ candidate = text
279
+ patterns = [
280
+ re.compile(r"(?i)^\s*(?:the\s+)?final\s+answer\s*(?:is)?\s*[:=\-]?\s*"),
281
+ re.compile(r"(?i)^\s*(?:the\s+)?answer\s*(?:is)?\s*[:=\-]?\s*"),
282
+ re.compile(r"(?i)^\s*ans\s*(?:is)?\s*[:=\-]?\s*"),
283
+ re.compile(r"(?i)^\s*(?:final\s+result|result)\s*(?:is)?\s*[:=\-]?\s*"),
284
+ ]
285
+ for pattern in patterns:
286
+ stripped = pattern.sub("", candidate, count=1)
287
+ if stripped != candidate:
288
+ return stripped.strip()
289
+ return candidate
290
+
291
+
292
+ def extract_candidate_answers(value: Any) -> list[str]:
293
+ if value is None:
294
+ return []
295
+ if isinstance(value, (dict, list)):
296
+ text = json.dumps(value, ensure_ascii=False)
297
+ else:
298
+ text = str(value)
299
+ text = text.strip()
300
+ if not text:
301
+ return []
302
+
303
+ candidates: list[str] = []
304
+
305
+ candidates.extend(_maybe_extract_from_json(text))
306
+
307
+ boxed_matches = BOXED_PATTERN.findall(text)
308
+ for match in reversed(boxed_matches):
309
+ stripped = match.strip()
310
+ if stripped:
311
+ candidates.append(stripped)
312
+
313
+ hash_matches = HASH_PATTERN.findall(text)
314
+ if hash_matches:
315
+ candidates.append(hash_matches[-1].strip())
316
+
317
+ for pattern in FINAL_ANSWER_PATTERNS:
318
+ for match in pattern.finditer(text):
319
+ snippet = match.group(1).strip()
320
+ if snippet:
321
+ candidates.append(snippet)
322
+
323
+ equals_match = re.search(r"=\s*([^=\n]+)$", text)
324
+ if equals_match:
325
+ candidates.append(equals_match.group(1).strip())
326
+
327
+ lines = [ln.strip() for ln in text.splitlines() if ln.strip()]
328
+ if lines:
329
+ candidates.append(lines[-1])
330
+
331
+ candidates.append(text)
332
+
333
+ return _dedupe_preserving_order(candidates)
334
+
335
+
336
+ def _maybe_extract_from_json(text: str) -> list[str]:
337
+ stripped = text.strip()
338
+ if not stripped or stripped[0] not in "[{":
339
+ return []
340
+ try:
341
+ payload = json.loads(stripped)
342
+ except json.JSONDecodeError:
343
+ return []
344
+
345
+ values: list[str] = []
346
+
347
+ def _collect(obj: Any) -> None:
348
+ if isinstance(obj, dict):
349
+ for key, val in obj.items():
350
+ if isinstance(val, (str, int, float)) and key.lower() in OUTPUT_KEYWORDS:
351
+ values.append(str(val))
352
+ elif isinstance(val, (dict, list)):
353
+ _collect(val)
354
+ elif isinstance(obj, list):
355
+ for item in obj:
356
+ _collect(item)
357
+
358
+ _collect(payload)
359
+ return values
360
+
361
+
362
+ def _dedupe_preserving_order(items: Iterable[str]) -> list[str]:
363
+ seen: set[str] = set()
364
+ deduped: list[str] = []
365
+ for item in items:
366
+ if not item:
367
+ continue
368
+ if item in seen:
369
+ continue
370
+ seen.add(item)
371
+ deduped.append(item)
372
+ return deduped
373
+
374
+
375
+ def compare_answers(gold: str, prediction: str) -> ComparisonResult:
376
+ normalized_gold = normalize_answer_text(gold)
377
+ candidates = extract_candidate_answers(prediction)
378
+ if not normalized_gold:
379
+ return ComparisonResult(False, None, None, normalized_gold, None, candidates)
380
+
381
+ for candidate in candidates:
382
+ normalized_candidate = normalize_answer_text(candidate)
383
+ if not normalized_candidate:
384
+ continue
385
+ if normalized_candidate == normalized_gold:
386
+ return ComparisonResult(True, candidate.strip(), "exact", normalized_gold, normalized_candidate, candidates)
387
+ if normalized_gold in normalized_candidate:
388
+ return ComparisonResult(True, candidate.strip(), "substring_match", normalized_gold, normalized_candidate, candidates)
389
+
390
+ return ComparisonResult(False, None, None, normalized_gold, None, candidates)
391
+
392
+
393
+ def evaluate_rows(
394
+ records: Sequence[dict[str, Any]], *, final_answer_column: str, key_column: str
395
+ ) -> list[dict[str, Any]]:
396
+ rows: list[dict[str, Any]] = []
397
+ for idx, row in enumerate(records):
398
+ gold = row.get(final_answer_column)
399
+ prediction = row.get(MODEL_OUTPUT_COL)
400
+ key = row.get(key_column, row.get(ROW_ID_COL, idx))
401
+
402
+ gold_text = str(gold)
403
+ has_prediction = not _is_missing(prediction)
404
+
405
+ if has_prediction:
406
+ comp = compare_answers(gold_text, str(prediction))
407
+ else:
408
+ comp = ComparisonResult(False, None, None, normalize_answer_text(gold_text), None, [])
409
+
410
+ key_value: Any = key
411
+ if isinstance(key_value, numbers.Integral):
412
+ key_value = int(key_value)
413
+ elif isinstance(key_value, numbers.Real) and float(key_value).is_integer():
414
+ key_value = int(key_value)
415
+
416
+ record = {
417
+ "row_key_column": key_column,
418
+ "row_key": key_value,
419
+ "final_answer": gold_text,
420
+ "model_output": str(prediction) if has_prediction else None,
421
+ "has_prediction": has_prediction,
422
+ "is_correct": has_prediction and comp.is_match,
423
+ "match_type": comp.match_type if has_prediction else None,
424
+ "matched_candidate": comp.matched_candidate,
425
+ "first_candidate": comp.candidates[0] if comp.candidates else None,
426
+ "candidate_count": len(comp.candidates),
427
+ "normalized_final_answer": comp.normalized_gold,
428
+ "normalized_candidate": comp.normalized_candidate,
429
+ }
430
+
431
+ if not has_prediction:
432
+ record["failure_reason"] = "no_prediction"
433
+ elif not comp.candidates:
434
+ record["failure_reason"] = "no_candidate"
435
+ elif not comp.is_match:
436
+ record["failure_reason"] = "mismatch"
437
+ else:
438
+ record["failure_reason"] = None
439
+
440
+ rows.append(record)
441
+ return rows
442
+
443
+
444
+ def _is_missing(value: Any) -> bool:
445
+ if value is None:
446
+ return True
447
+ if isinstance(value, float) and math.isnan(value):
448
+ return True
449
+ if isinstance(value, str) and not value.strip():
450
+ return True
451
+ return False
452
+
453
+
454
+ def write_jsonl(path: Path, rows: Sequence[dict[str, Any]]) -> None:
455
+ path.parent.mkdir(parents=True, exist_ok=True)
456
+ with path.open("w", encoding="utf-8") as handle:
457
+ for row in rows:
458
+ handle.write(json.dumps(row, ensure_ascii=False) + "\n")
459
+
460
+
461
+ def main() -> None:
462
+ args = parse_args()
463
+ dataset_rows = load_records(Path(args.dataset_path))
464
+ if not dataset_rows:
465
+ raise ValueError("Dataset file is empty")
466
+ if not any(args.final_answer_column in row for row in dataset_rows):
467
+ raise KeyError(
468
+ f"'{args.final_answer_column}' not found in dataset columns"
469
+ )
470
+ filtered_dataset = [
471
+ row for row in dataset_rows if not _is_missing(row.get(args.final_answer_column))
472
+ ]
473
+ if not filtered_dataset:
474
+ raise ValueError("Dataset does not contain rows with non-empty final answers")
475
+
476
+ prediction_rows = load_records(Path(args.predictions_path))
477
+ aligned_rows, key_column = align_tables(
478
+ filtered_dataset,
479
+ prediction_rows,
480
+ id_column=args.id_column,
481
+ prediction_column=args.prediction_column,
482
+ )
483
+
484
+ evaluation_rows = evaluate_rows(aligned_rows, final_answer_column=args.final_answer_column, key_column=key_column)
485
+
486
+ total_rows = len(evaluation_rows)
487
+ with_prediction = sum(1 for row in evaluation_rows if row["has_prediction"])
488
+ matches = sum(1 for row in evaluation_rows if row["is_correct"])
489
+ no_candidate = sum(1 for row in evaluation_rows if row["failure_reason"] == "no_candidate")
490
+ no_prediction = sum(1 for row in evaluation_rows if row["failure_reason"] == "no_prediction")
491
+
492
+ accuracy = matches / with_prediction if with_prediction else 0.0
493
+
494
+ print(f"Evaluated rows: {total_rows}")
495
+ print(f"Rows with predictions: {with_prediction}")
496
+ print(f"Matches: {matches}")
497
+ print(f"Accuracy: {accuracy:.2%}")
498
+ if no_prediction:
499
+ print(f"Rows without predictions: {no_prediction}")
500
+ if no_candidate:
501
+ print(f"Rows where no final answer was extractable: {no_candidate}")
502
+
503
+ mismatches = [row for row in evaluation_rows if not row["is_correct"]]
504
+
505
+ if args.dump_results:
506
+ write_jsonl(Path(args.dump_results), evaluation_rows)
507
+ print(f"Wrote detailed results to {args.dump_results}")
508
+ if args.dump_mismatches:
509
+ write_jsonl(Path(args.dump_mismatches), mismatches)
510
+ print(f"Wrote mismatches to {args.dump_mismatches}")
511
+
512
+ reward = "pass" if total_rows and matches == total_rows else "fail"
513
+ print(f"Reward: {reward}")
514
+
515
+
516
+ if __name__ == "__main__":
517
+ main()