File size: 229,596 Bytes
2166012
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
{"id": "pygod_000001", "task_type": "bugfix", "difficulty": "hard", "prompt": "Fix a subtle bug in a Python module used for CLI tooling.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must avoid global state, must handle Windows paths safely, must be streaming-friendly.\nDifficulty: hard. Avoid quadratic behavior on large inputs.", "response": "Deliverable: production-ready reference solution.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000002", "task_type": "bugfix", "difficulty": "hard", "prompt": "Fix a subtle bug in a Python module used for metrics aggregation.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must avoid global state, must include type hints, must handle Windows paths safely.\nDifficulty: hard. Avoid quadratic behavior on large inputs.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000003", "task_type": "bugfix", "difficulty": "hard", "prompt": "Fix a subtle bug in a Python module used for log processing.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must provide clear error messages, must include type hints, must be easy to extend.\nDifficulty: hard. Guarantee deterministic output ordering.", "response": "Deliverable: production-ready reference solution.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000004", "task_type": "bugfix", "difficulty": "hard", "prompt": "Fix a subtle bug in a Python module used for report generation.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must be streaming-friendly, must be memory-efficient, must include type hints, must handle Windows paths safely.\nDifficulty: hard. Support both Windows and POSIX paths correctly.", "response": "Deliverable: implementation + tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000005", "task_type": "bugfix", "difficulty": "easy", "prompt": "Fix a subtle bug in a Python module used for time series.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must provide clear error messages, stdlib-only, must include docstrings, must handle Windows paths safely.\nDifficulty: easy. Guarantee deterministic output ordering.", "response": "Deliverable: production-ready reference solution.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000006", "task_type": "bugfix", "difficulty": "easy", "prompt": "Fix a subtle bug in a Python module used for event dispatching.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must be easy to extend, must handle Windows paths safely.\nDifficulty: easy. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000007", "task_type": "bugfix", "difficulty": "expert", "prompt": "Fix a subtle bug in a Python module used for caching layer.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: stdlib-only, must include docstrings, must be streaming-friendly, must avoid global state.\nDifficulty: expert. Guarantee deterministic output ordering.", "response": "Deliverable: production-ready reference solution.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000008", "task_type": "bugfix", "difficulty": "medium", "prompt": "Fix a subtle bug in a Python module used for ETL pipelines.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must be easy to extend, must provide clear error messages, stdlib-only, must avoid global state.\nDifficulty: medium. Support both Windows and POSIX paths correctly.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000009", "task_type": "bugfix", "difficulty": "hard", "prompt": "Fix a subtle bug in a Python module used for rate limiting.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must include type hints, must include tests, stdlib-only.\nDifficulty: hard. Guarantee deterministic output ordering.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000010", "task_type": "bugfix", "difficulty": "easy", "prompt": "Fix a subtle bug in a Python module used for file synchronization.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must include docstrings, must be streaming-friendly, stdlib-only.\nDifficulty: easy. Support both Windows and POSIX paths correctly.", "response": "Deliverable: production-ready reference solution.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000011", "task_type": "bugfix", "difficulty": "expert", "prompt": "Fix a subtle bug in a Python module used for data modeling.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: stdlib-only, must avoid global state, must be streaming-friendly, must handle Windows paths safely.\nDifficulty: expert. Guarantee deterministic output ordering.", "response": "Deliverable: implementation + tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000012", "task_type": "bugfix", "difficulty": "expert", "prompt": "Fix a subtle bug in a Python module used for data modeling.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must include type hints, must be memory-efficient, must handle Windows paths safely.\nDifficulty: expert. Provide helpful exceptions with actionable messages.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000013", "task_type": "bugfix", "difficulty": "medium", "prompt": "Fix a subtle bug in a Python module used for file synchronization.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must be easy to extend, must avoid global state.\nDifficulty: medium. Support both Windows and POSIX paths correctly.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000014", "task_type": "bugfix", "difficulty": "hard", "prompt": "Fix a subtle bug in a Python module used for CLI tooling.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must handle Windows paths safely, must avoid global state.\nDifficulty: hard. Support both Windows and POSIX paths correctly.", "response": "Deliverable: production-ready reference solution.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000015", "task_type": "bugfix", "difficulty": "expert", "prompt": "Fix a subtle bug in a Python module used for CLI tooling.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must be easy to extend, must handle Windows paths safely, stdlib-only, must provide clear error messages.\nDifficulty: expert. Support both Windows and POSIX paths correctly.", "response": "Deliverable: production-ready reference solution.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000016", "task_type": "bugfix", "difficulty": "expert", "prompt": "Fix a subtle bug in a Python module used for event dispatching.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must handle Windows paths safely, must avoid global state, must provide clear error messages, must include type hints.\nDifficulty: expert. Ensure the API is testable via dependency injection.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000017", "task_type": "bugfix", "difficulty": "expert", "prompt": "Fix a subtle bug in a Python module used for parsing.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must provide clear error messages, must be streaming-friendly.\nDifficulty: expert. Include a small CLI example if relevant.", "response": "Deliverable: production-ready reference solution.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000018", "task_type": "bugfix", "difficulty": "expert", "prompt": "Fix a subtle bug in a Python module used for telemetry ingestion.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: stdlib-only, must handle Windows paths safely, must include type hints.\nDifficulty: expert. Include a small CLI example if relevant.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000019", "task_type": "bugfix", "difficulty": "expert", "prompt": "Fix a subtle bug in a Python module used for parsing.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: stdlib-only, must include docstrings.\nDifficulty: expert. Support both Windows and POSIX paths correctly.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000020", "task_type": "refactor", "difficulty": "medium", "prompt": "Refactor a Python module used for report generation into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must be memory-efficient, must be streaming-friendly.\nDifficulty: medium. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: API + reference implementation + tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000021", "task_type": "refactor", "difficulty": "medium", "prompt": "Refactor a Python module used for data modeling into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must handle Windows paths safely, must be streaming-friendly, must include type hints, must provide clear error messages.\nDifficulty: medium. Include a small CLI example if relevant.", "response": "Deliverable: production-ready reference solution.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000022", "task_type": "refactor", "difficulty": "easy", "prompt": "Refactor a Python module used for task scheduling into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: stdlib-only, must include tests, must include docstrings.\nDifficulty: easy. Avoid quadratic behavior on large inputs.", "response": "Deliverable: implementation + tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000023", "task_type": "refactor", "difficulty": "expert", "prompt": "Refactor a Python module used for report generation into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: stdlib-only, must include tests, must be memory-efficient, must include type hints.\nDifficulty: expert. Guarantee deterministic output ordering.", "response": "Deliverable: production-ready reference solution.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000024", "task_type": "refactor", "difficulty": "easy", "prompt": "Refactor a Python module used for report generation into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must avoid global state, must be streaming-friendly, must handle Windows paths safely.\nDifficulty: easy. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: API + reference implementation + tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000025", "task_type": "refactor", "difficulty": "hard", "prompt": "Refactor a Python module used for metrics aggregation into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must include type hints, stdlib-only.\nDifficulty: hard. Ensure the API is testable via dependency injection.", "response": "Deliverable: production-ready reference solution.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000026", "task_type": "refactor", "difficulty": "medium", "prompt": "Refactor a Python module used for file synchronization into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must include type hints, must include tests.\nDifficulty: medium. Support both Windows and POSIX paths correctly.", "response": "Deliverable: API + reference implementation + tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000027", "task_type": "refactor", "difficulty": "medium", "prompt": "Refactor a Python module used for ETL pipelines into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must be memory-efficient, must avoid global state, must handle Windows paths safely.\nDifficulty: medium. Include a small CLI example if relevant.", "response": "Deliverable: API + reference implementation + tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000028", "task_type": "refactor", "difficulty": "expert", "prompt": "Refactor a Python module used for file synchronization into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must include type hints, must provide clear error messages.\nDifficulty: expert. Include a small CLI example if relevant.", "response": "Deliverable: production-ready reference solution.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000029", "task_type": "refactor", "difficulty": "hard", "prompt": "Refactor a Python module used for rate limiting into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must avoid global state, stdlib-only, must include docstrings, must provide clear error messages.\nDifficulty: hard. Provide helpful exceptions with actionable messages.", "response": "Deliverable: production-ready reference solution.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000030", "task_type": "refactor", "difficulty": "easy", "prompt": "Refactor a Python module used for metrics aggregation into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must avoid global state, must be streaming-friendly, must include docstrings, must include type hints.\nDifficulty: easy. Provide helpful exceptions with actionable messages.", "response": "Deliverable: implementation + tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000031", "task_type": "refactor", "difficulty": "expert", "prompt": "Refactor a Python module used for event dispatching into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must handle Windows paths safely, stdlib-only, must provide clear error messages.\nDifficulty: expert. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: implementation + tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000032", "task_type": "refactor", "difficulty": "easy", "prompt": "Refactor a Python module used for ETL pipelines into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must avoid global state, must provide clear error messages, must include docstrings.\nDifficulty: easy. Avoid quadratic behavior on large inputs.", "response": "Deliverable: production-ready reference solution.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000033", "task_type": "refactor", "difficulty": "easy", "prompt": "Refactor a Python module used for task scheduling into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must include tests, must be memory-efficient, must include type hints, must include docstrings.\nDifficulty: easy. Guarantee deterministic output ordering.", "response": "Deliverable: implementation + tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000034", "task_type": "refactor", "difficulty": "medium", "prompt": "Refactor a Python module used for time series into a clean, extensible design.\nRequirements: outline the new architecture briefly, then provide the refactored code and tests.\nConstraints: must provide clear error messages, must include type hints.\nDifficulty: medium. Guarantee deterministic output ordering.", "response": "Deliverable: production-ready reference solution.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "architecture", "clean-code"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000035", "task_type": "tests", "difficulty": "expert", "prompt": "Write a thorough test suite for a Python component used for log processing.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must include docstrings, must provide clear error messages, must be streaming-friendly.\nDifficulty: expert. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: corrected code and verification tests.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000036", "task_type": "tests", "difficulty": "hard", "prompt": "Write a thorough test suite for a Python component used for ETL pipelines.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must be easy to extend, must handle Windows paths safely, must include docstrings, stdlib-only.\nDifficulty: hard. Include a small CLI example if relevant.", "response": "Deliverable: corrected code and verification tests.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000037", "task_type": "tests", "difficulty": "hard", "prompt": "Write a thorough test suite for a Python component used for file synchronization.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must include docstrings, stdlib-only.\nDifficulty: hard. Provide helpful exceptions with actionable messages.", "response": "Deliverable: implementation + tests.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000038", "task_type": "tests", "difficulty": "expert", "prompt": "Write a thorough test suite for a Python component used for parsing.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must avoid global state, stdlib-only.\nDifficulty: expert. Provide helpful exceptions with actionable messages.", "response": "Deliverable: implementation + tests.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000039", "task_type": "tests", "difficulty": "expert", "prompt": "Write a thorough test suite for a Python component used for report generation.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must handle Windows paths safely, must include docstrings.\nDifficulty: expert. Support both Windows and POSIX paths correctly.", "response": "Deliverable: API + reference implementation + tests.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000040", "task_type": "tests", "difficulty": "medium", "prompt": "Write a thorough test suite for a Python component used for telemetry ingestion.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must provide clear error messages, must handle Windows paths safely, must include docstrings.\nDifficulty: medium. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: production-ready reference solution.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000041", "task_type": "tests", "difficulty": "medium", "prompt": "Write a thorough test suite for a Python component used for metrics aggregation.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must provide clear error messages, must avoid global state.\nDifficulty: medium. Guarantee deterministic output ordering.", "response": "Deliverable: production-ready reference solution.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000042", "task_type": "tests", "difficulty": "medium", "prompt": "Write a thorough test suite for a Python component used for rate limiting.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must be easy to extend, must handle Windows paths safely, must include type hints.\nDifficulty: medium. Ensure the API is testable via dependency injection.", "response": "Deliverable: production-ready reference solution.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000043", "task_type": "tests", "difficulty": "medium", "prompt": "Write a thorough test suite for a Python component used for task scheduling.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must include docstrings, must include type hints, must be streaming-friendly.\nDifficulty: medium. Avoid quadratic behavior on large inputs.", "response": "Deliverable: corrected code and verification tests.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000044", "task_type": "tests", "difficulty": "medium", "prompt": "Write a thorough test suite for a Python component used for metrics aggregation.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must include tests, must provide clear error messages.\nDifficulty: medium. Guarantee deterministic output ordering.", "response": "Deliverable: implementation + tests.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000045", "task_type": "tests", "difficulty": "easy", "prompt": "Write a thorough test suite for a Python component used for ETL pipelines.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must handle Windows paths safely, must include type hints.\nDifficulty: easy. Ensure the API is testable via dependency injection.", "response": "Deliverable: production-ready reference solution.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000046", "task_type": "tests", "difficulty": "easy", "prompt": "Write a thorough test suite for a Python component used for time series.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must include docstrings, must avoid global state, must provide clear error messages, stdlib-only.\nDifficulty: easy. Guarantee deterministic output ordering.", "response": "Deliverable: production-ready reference solution.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000047", "task_type": "tests", "difficulty": "hard", "prompt": "Write a thorough test suite for a Python component used for file synchronization.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must include docstrings, stdlib-only, must be memory-efficient, must provide clear error messages.\nDifficulty: hard. Ensure the API is testable via dependency injection.", "response": "Deliverable: corrected code and verification tests.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000048", "task_type": "tests", "difficulty": "hard", "prompt": "Write a thorough test suite for a Python component used for validation.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: stdlib-only, must be streaming-friendly, must avoid global state.\nDifficulty: hard. Ensure the API is testable via dependency injection.", "response": "Deliverable: corrected code and verification tests.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000049", "task_type": "tests", "difficulty": "easy", "prompt": "Write a thorough test suite for a Python component used for rate limiting.\nRequirements: cover edge cases, invalid inputs, and behavior invariants.\nConstraints: must be streaming-friendly, stdlib-only, must handle Windows paths safely, must avoid global state.\nDifficulty: easy. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: production-ready reference solution.\n```python\n# tests (pytest-style) for a module providing: normalize(), parse_lines(), compute_stats()\nimport math\nimport pytest\n\ndef test_normalize_idempotent(module_under_test):\n    x = \"  A\\tB  \"\n    assert module_under_test.normalize(module_under_test.normalize(x)) == module_under_test.normalize(x)\n\ndef test_parse_lines_rejects_invalid(module_under_test):\n    with pytest.raises(ValueError):\n        module_under_test.parse_lines([\"\", \"???\", None])  # type: ignore[arg-type]\n\ndef test_compute_stats_basic(module_under_test):\n    data = [1, 2, 3, 4]\n    s = module_under_test.compute_stats(data)\n    assert s.count == 4\n    assert s.total == 10\n    assert math.isclose(s.mean, 2.5)\n```\n", "metadata": {"tags": ["python", "pytest_style", "unittesting"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000050", "task_type": "algorithms", "difficulty": "expert", "prompt": "Design and implement an algorithm for task scheduling.\nRequirements: define inputs/outputs, implement solution with complexity analysis, and include tests.\nConstraints: must provide clear error messages, must handle Windows paths safely, must include tests.\nDifficulty: expert. Support both Windows and POSIX paths correctly.", "response": "Deliverable: API + reference implementation + tests.\nProblem: compute rolling sums over events (ts:int,value:int) with window W seconds. Events may be out-of-order.\n\nComplexity: sorting O(n log n); window maintenance O(n); memory O(k).\n\n```python\nfrom __future__ import annotations\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple\n\n@dataclass(frozen=True, slots=True)\nclass Event:\n    ts: int\n    value: int\n\ndef rolling_sum(events: Iterable[Event], window_s: int) -> List[Tuple[int,int]]:\n    if window_s < 0:\n        raise ValueError(\"window_s must be >= 0\")\n    evs = sorted(events, key=lambda e: e.ts)\n    q: deque[Event] = deque()\n    s = 0\n    out: List[Tuple[int,int]] = []\n    for e in evs:\n        q.append(e)\n        s += e.value\n        cutoff = e.ts - window_s\n        while q and q[0].ts < cutoff:\n            old = q.popleft()\n            s -= old.value\n        out.append((e.ts, s))\n    return out\n```\n\n```python\nimport pytest\nfrom module_under_test import Event, rolling_sum\n\ndef test_rolling_sum():\n    ev = [Event(10,1), Event(11,2), Event(20,5)]\n    assert rolling_sum(ev, 5) == [(10,1),(11,3),(20,5)]\n```\n", "metadata": {"tags": ["python", "complexity", "correctness"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000051", "task_type": "algorithms", "difficulty": "easy", "prompt": "Design and implement an algorithm for report generation.\nRequirements: define inputs/outputs, implement solution with complexity analysis, and include tests.\nConstraints: must provide clear error messages, must be memory-efficient.\nDifficulty: easy. Provide helpful exceptions with actionable messages.", "response": "Deliverable: API + reference implementation + tests.\nProblem: compute rolling sums over events (ts:int,value:int) with window W seconds. Events may be out-of-order.\n\nComplexity: sorting O(n log n); window maintenance O(n); memory O(k).\n\n```python\nfrom __future__ import annotations\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple\n\n@dataclass(frozen=True, slots=True)\nclass Event:\n    ts: int\n    value: int\n\ndef rolling_sum(events: Iterable[Event], window_s: int) -> List[Tuple[int,int]]:\n    if window_s < 0:\n        raise ValueError(\"window_s must be >= 0\")\n    evs = sorted(events, key=lambda e: e.ts)\n    q: deque[Event] = deque()\n    s = 0\n    out: List[Tuple[int,int]] = []\n    for e in evs:\n        q.append(e)\n        s += e.value\n        cutoff = e.ts - window_s\n        while q and q[0].ts < cutoff:\n            old = q.popleft()\n            s -= old.value\n        out.append((e.ts, s))\n    return out\n```\n\n```python\nimport pytest\nfrom module_under_test import Event, rolling_sum\n\ndef test_rolling_sum():\n    ev = [Event(10,1), Event(11,2), Event(20,5)]\n    assert rolling_sum(ev, 5) == [(10,1),(11,3),(20,5)]\n```\n", "metadata": {"tags": ["python", "complexity", "correctness"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000052", "task_type": "algorithms", "difficulty": "hard", "prompt": "Design and implement an algorithm for file synchronization.\nRequirements: define inputs/outputs, implement solution with complexity analysis, and include tests.\nConstraints: must be easy to extend, must avoid global state, must provide clear error messages, must handle Windows paths safely.\nDifficulty: hard. Support both Windows and POSIX paths correctly.", "response": "Deliverable: production-ready reference solution.\nProblem: compute rolling sums over events (ts:int,value:int) with window W seconds. Events may be out-of-order.\n\nComplexity: sorting O(n log n); window maintenance O(n); memory O(k).\n\n```python\nfrom __future__ import annotations\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple\n\n@dataclass(frozen=True, slots=True)\nclass Event:\n    ts: int\n    value: int\n\ndef rolling_sum(events: Iterable[Event], window_s: int) -> List[Tuple[int,int]]:\n    if window_s < 0:\n        raise ValueError(\"window_s must be >= 0\")\n    evs = sorted(events, key=lambda e: e.ts)\n    q: deque[Event] = deque()\n    s = 0\n    out: List[Tuple[int,int]] = []\n    for e in evs:\n        q.append(e)\n        s += e.value\n        cutoff = e.ts - window_s\n        while q and q[0].ts < cutoff:\n            old = q.popleft()\n            s -= old.value\n        out.append((e.ts, s))\n    return out\n```\n\n```python\nimport pytest\nfrom module_under_test import Event, rolling_sum\n\ndef test_rolling_sum():\n    ev = [Event(10,1), Event(11,2), Event(20,5)]\n    assert rolling_sum(ev, 5) == [(10,1),(11,3),(20,5)]\n```\n", "metadata": {"tags": ["python", "complexity", "correctness"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000053", "task_type": "algorithms", "difficulty": "expert", "prompt": "Design and implement an algorithm for parsing.\nRequirements: define inputs/outputs, implement solution with complexity analysis, and include tests.\nConstraints: must avoid global state, must include tests, must include type hints, must include docstrings.\nDifficulty: expert. Ensure the API is testable via dependency injection.", "response": "Deliverable: production-ready reference solution.\nProblem: compute rolling sums over events (ts:int,value:int) with window W seconds. Events may be out-of-order.\n\nComplexity: sorting O(n log n); window maintenance O(n); memory O(k).\n\n```python\nfrom __future__ import annotations\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple\n\n@dataclass(frozen=True, slots=True)\nclass Event:\n    ts: int\n    value: int\n\ndef rolling_sum(events: Iterable[Event], window_s: int) -> List[Tuple[int,int]]:\n    if window_s < 0:\n        raise ValueError(\"window_s must be >= 0\")\n    evs = sorted(events, key=lambda e: e.ts)\n    q: deque[Event] = deque()\n    s = 0\n    out: List[Tuple[int,int]] = []\n    for e in evs:\n        q.append(e)\n        s += e.value\n        cutoff = e.ts - window_s\n        while q and q[0].ts < cutoff:\n            old = q.popleft()\n            s -= old.value\n        out.append((e.ts, s))\n    return out\n```\n\n```python\nimport pytest\nfrom module_under_test import Event, rolling_sum\n\ndef test_rolling_sum():\n    ev = [Event(10,1), Event(11,2), Event(20,5)]\n    assert rolling_sum(ev, 5) == [(10,1),(11,3),(20,5)]\n```\n", "metadata": {"tags": ["python", "complexity", "correctness"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000054", "task_type": "algorithms", "difficulty": "hard", "prompt": "Design and implement an algorithm for rate limiting.\nRequirements: define inputs/outputs, implement solution with complexity analysis, and include tests.\nConstraints: must include docstrings, must be streaming-friendly, must include type hints, stdlib-only.\nDifficulty: hard. Support both Windows and POSIX paths correctly.", "response": "Deliverable: production-ready reference solution.\nProblem: compute rolling sums over events (ts:int,value:int) with window W seconds. Events may be out-of-order.\n\nComplexity: sorting O(n log n); window maintenance O(n); memory O(k).\n\n```python\nfrom __future__ import annotations\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple\n\n@dataclass(frozen=True, slots=True)\nclass Event:\n    ts: int\n    value: int\n\ndef rolling_sum(events: Iterable[Event], window_s: int) -> List[Tuple[int,int]]:\n    if window_s < 0:\n        raise ValueError(\"window_s must be >= 0\")\n    evs = sorted(events, key=lambda e: e.ts)\n    q: deque[Event] = deque()\n    s = 0\n    out: List[Tuple[int,int]] = []\n    for e in evs:\n        q.append(e)\n        s += e.value\n        cutoff = e.ts - window_s\n        while q and q[0].ts < cutoff:\n            old = q.popleft()\n            s -= old.value\n        out.append((e.ts, s))\n    return out\n```\n\n```python\nimport pytest\nfrom module_under_test import Event, rolling_sum\n\ndef test_rolling_sum():\n    ev = [Event(10,1), Event(11,2), Event(20,5)]\n    assert rolling_sum(ev, 5) == [(10,1),(11,3),(20,5)]\n```\n", "metadata": {"tags": ["python", "complexity", "correctness"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000055", "task_type": "algorithms", "difficulty": "medium", "prompt": "Design and implement an algorithm for data modeling.\nRequirements: define inputs/outputs, implement solution with complexity analysis, and include tests.\nConstraints: must avoid global state, must include tests, must include type hints, must handle Windows paths safely.\nDifficulty: medium. Guarantee deterministic output ordering.", "response": "Deliverable: API + reference implementation + tests.\nProblem: compute rolling sums over events (ts:int,value:int) with window W seconds. Events may be out-of-order.\n\nComplexity: sorting O(n log n); window maintenance O(n); memory O(k).\n\n```python\nfrom __future__ import annotations\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple\n\n@dataclass(frozen=True, slots=True)\nclass Event:\n    ts: int\n    value: int\n\ndef rolling_sum(events: Iterable[Event], window_s: int) -> List[Tuple[int,int]]:\n    if window_s < 0:\n        raise ValueError(\"window_s must be >= 0\")\n    evs = sorted(events, key=lambda e: e.ts)\n    q: deque[Event] = deque()\n    s = 0\n    out: List[Tuple[int,int]] = []\n    for e in evs:\n        q.append(e)\n        s += e.value\n        cutoff = e.ts - window_s\n        while q and q[0].ts < cutoff:\n            old = q.popleft()\n            s -= old.value\n        out.append((e.ts, s))\n    return out\n```\n\n```python\nimport pytest\nfrom module_under_test import Event, rolling_sum\n\ndef test_rolling_sum():\n    ev = [Event(10,1), Event(11,2), Event(20,5)]\n    assert rolling_sum(ev, 5) == [(10,1),(11,3),(20,5)]\n```\n", "metadata": {"tags": ["python", "complexity", "correctness"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000056", "task_type": "algorithms", "difficulty": "hard", "prompt": "Design and implement an algorithm for time series.\nRequirements: define inputs/outputs, implement solution with complexity analysis, and include tests.\nConstraints: stdlib-only, must be streaming-friendly, must include type hints, must be memory-efficient.\nDifficulty: hard. Provide helpful exceptions with actionable messages.", "response": "Deliverable: API + reference implementation + tests.\nProblem: compute rolling sums over events (ts:int,value:int) with window W seconds. Events may be out-of-order.\n\nComplexity: sorting O(n log n); window maintenance O(n); memory O(k).\n\n```python\nfrom __future__ import annotations\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple\n\n@dataclass(frozen=True, slots=True)\nclass Event:\n    ts: int\n    value: int\n\ndef rolling_sum(events: Iterable[Event], window_s: int) -> List[Tuple[int,int]]:\n    if window_s < 0:\n        raise ValueError(\"window_s must be >= 0\")\n    evs = sorted(events, key=lambda e: e.ts)\n    q: deque[Event] = deque()\n    s = 0\n    out: List[Tuple[int,int]] = []\n    for e in evs:\n        q.append(e)\n        s += e.value\n        cutoff = e.ts - window_s\n        while q and q[0].ts < cutoff:\n            old = q.popleft()\n            s -= old.value\n        out.append((e.ts, s))\n    return out\n```\n\n```python\nimport pytest\nfrom module_under_test import Event, rolling_sum\n\ndef test_rolling_sum():\n    ev = [Event(10,1), Event(11,2), Event(20,5)]\n    assert rolling_sum(ev, 5) == [(10,1),(11,3),(20,5)]\n```\n", "metadata": {"tags": ["python", "complexity", "correctness"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000057", "task_type": "algorithms", "difficulty": "expert", "prompt": "Design and implement an algorithm for log processing.\nRequirements: define inputs/outputs, implement solution with complexity analysis, and include tests.\nConstraints: stdlib-only, must be memory-efficient.\nDifficulty: expert. Include a small CLI example if relevant.", "response": "Deliverable: production-ready reference solution.\nProblem: compute rolling sums over events (ts:int,value:int) with window W seconds. Events may be out-of-order.\n\nComplexity: sorting O(n log n); window maintenance O(n); memory O(k).\n\n```python\nfrom __future__ import annotations\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple\n\n@dataclass(frozen=True, slots=True)\nclass Event:\n    ts: int\n    value: int\n\ndef rolling_sum(events: Iterable[Event], window_s: int) -> List[Tuple[int,int]]:\n    if window_s < 0:\n        raise ValueError(\"window_s must be >= 0\")\n    evs = sorted(events, key=lambda e: e.ts)\n    q: deque[Event] = deque()\n    s = 0\n    out: List[Tuple[int,int]] = []\n    for e in evs:\n        q.append(e)\n        s += e.value\n        cutoff = e.ts - window_s\n        while q and q[0].ts < cutoff:\n            old = q.popleft()\n            s -= old.value\n        out.append((e.ts, s))\n    return out\n```\n\n```python\nimport pytest\nfrom module_under_test import Event, rolling_sum\n\ndef test_rolling_sum():\n    ev = [Event(10,1), Event(11,2), Event(20,5)]\n    assert rolling_sum(ev, 5) == [(10,1),(11,3),(20,5)]\n```\n", "metadata": {"tags": ["python", "complexity", "correctness"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000058", "task_type": "algorithms", "difficulty": "medium", "prompt": "Design and implement an algorithm for telemetry ingestion.\nRequirements: define inputs/outputs, implement solution with complexity analysis, and include tests.\nConstraints: stdlib-only, must provide clear error messages, must include docstrings, must be streaming-friendly.\nDifficulty: medium. Support both Windows and POSIX paths correctly.", "response": "Deliverable: implementation + tests.\nProblem: compute rolling sums over events (ts:int,value:int) with window W seconds. Events may be out-of-order.\n\nComplexity: sorting O(n log n); window maintenance O(n); memory O(k).\n\n```python\nfrom __future__ import annotations\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple\n\n@dataclass(frozen=True, slots=True)\nclass Event:\n    ts: int\n    value: int\n\ndef rolling_sum(events: Iterable[Event], window_s: int) -> List[Tuple[int,int]]:\n    if window_s < 0:\n        raise ValueError(\"window_s must be >= 0\")\n    evs = sorted(events, key=lambda e: e.ts)\n    q: deque[Event] = deque()\n    s = 0\n    out: List[Tuple[int,int]] = []\n    for e in evs:\n        q.append(e)\n        s += e.value\n        cutoff = e.ts - window_s\n        while q and q[0].ts < cutoff:\n            old = q.popleft()\n            s -= old.value\n        out.append((e.ts, s))\n    return out\n```\n\n```python\nimport pytest\nfrom module_under_test import Event, rolling_sum\n\ndef test_rolling_sum():\n    ev = [Event(10,1), Event(11,2), Event(20,5)]\n    assert rolling_sum(ev, 5) == [(10,1),(11,3),(20,5)]\n```\n", "metadata": {"tags": ["python", "complexity", "correctness"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000059", "task_type": "algorithms", "difficulty": "hard", "prompt": "Design and implement an algorithm for configuration loading.\nRequirements: define inputs/outputs, implement solution with complexity analysis, and include tests.\nConstraints: must handle Windows paths safely, must be memory-efficient, must be streaming-friendly, must be easy to extend.\nDifficulty: hard. Provide helpful exceptions with actionable messages.", "response": "Deliverable: corrected code and verification tests.\nProblem: compute rolling sums over events (ts:int,value:int) with window W seconds. Events may be out-of-order.\n\nComplexity: sorting O(n log n); window maintenance O(n); memory O(k).\n\n```python\nfrom __future__ import annotations\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Tuple\n\n@dataclass(frozen=True, slots=True)\nclass Event:\n    ts: int\n    value: int\n\ndef rolling_sum(events: Iterable[Event], window_s: int) -> List[Tuple[int,int]]:\n    if window_s < 0:\n        raise ValueError(\"window_s must be >= 0\")\n    evs = sorted(events, key=lambda e: e.ts)\n    q: deque[Event] = deque()\n    s = 0\n    out: List[Tuple[int,int]] = []\n    for e in evs:\n        q.append(e)\n        s += e.value\n        cutoff = e.ts - window_s\n        while q and q[0].ts < cutoff:\n            old = q.popleft()\n            s -= old.value\n        out.append((e.ts, s))\n    return out\n```\n\n```python\nimport pytest\nfrom module_under_test import Event, rolling_sum\n\ndef test_rolling_sum():\n    ev = [Event(10,1), Event(11,2), Event(20,5)]\n    assert rolling_sum(ev, 5) == [(10,1),(11,3),(20,5)]\n```\n", "metadata": {"tags": ["python", "complexity", "correctness"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000060", "task_type": "typing", "difficulty": "hard", "prompt": "Improve typing and robustness of a Python module used for event dispatching.\nRequirements: provide type-safe APIs (generics/protocols where appropriate) and include tests.\nConstraints: must avoid global state, must handle Windows paths safely, must include tests, stdlib-only.\nDifficulty: hard. Support both Windows and POSIX paths correctly.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Callable, Generic, Iterable, Protocol, TypeVar\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\nclass Mapper(Protocol[T, R]):\n    def __call__(self, x: T) -> R: ...\n\n@dataclass(frozen=True, slots=True)\nclass Batch(Generic[T]):\n    items: tuple[T, ...]\n\ndef map_batch(batch: Batch[T], fn: Mapper[T, R]) -> Batch[R]:\n    return Batch(items=tuple(fn(x) for x in batch.items))\n\ndef chunked(xs: Iterable[T], size: int) -> Iterable[Batch[T]]:\n    if size <= 0:\n        raise ValueError(\"size must be > 0\")\n    buf: list[T] = []\n    for x in xs:\n        buf.append(x)\n        if len(buf) == size:\n            yield Batch(items=tuple(buf))\n            buf.clear()\n    if buf:\n        yield Batch(items=tuple(buf))\n```\n\n```python\nimport pytest\nfrom module_under_test import Batch, map_batch, chunked\n\ndef test_map_batch():\n    b = Batch(items=(1,2,3))\n    assert map_batch(b, lambda x: x*2).items == (2,4,6)\n\ndef test_chunked():\n    out = list(chunked([1,2,3,4,5], 2))\n    assert [b.items for b in out] == [(1,2),(3,4),(5,)]\n```\n", "metadata": {"tags": ["python", "mypy", "typing"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000061", "task_type": "typing", "difficulty": "easy", "prompt": "Improve typing and robustness of a Python module used for time series.\nRequirements: provide type-safe APIs (generics/protocols where appropriate) and include tests.\nConstraints: must avoid global state, must be memory-efficient, must be easy to extend.\nDifficulty: easy. Include a small CLI example if relevant.", "response": "Deliverable: implementation + tests.\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Callable, Generic, Iterable, Protocol, TypeVar\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\nclass Mapper(Protocol[T, R]):\n    def __call__(self, x: T) -> R: ...\n\n@dataclass(frozen=True, slots=True)\nclass Batch(Generic[T]):\n    items: tuple[T, ...]\n\ndef map_batch(batch: Batch[T], fn: Mapper[T, R]) -> Batch[R]:\n    return Batch(items=tuple(fn(x) for x in batch.items))\n\ndef chunked(xs: Iterable[T], size: int) -> Iterable[Batch[T]]:\n    if size <= 0:\n        raise ValueError(\"size must be > 0\")\n    buf: list[T] = []\n    for x in xs:\n        buf.append(x)\n        if len(buf) == size:\n            yield Batch(items=tuple(buf))\n            buf.clear()\n    if buf:\n        yield Batch(items=tuple(buf))\n```\n\n```python\nimport pytest\nfrom module_under_test import Batch, map_batch, chunked\n\ndef test_map_batch():\n    b = Batch(items=(1,2,3))\n    assert map_batch(b, lambda x: x*2).items == (2,4,6)\n\ndef test_chunked():\n    out = list(chunked([1,2,3,4,5], 2))\n    assert [b.items for b in out] == [(1,2),(3,4),(5,)]\n```\n", "metadata": {"tags": ["python", "mypy", "typing"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000062", "task_type": "typing", "difficulty": "medium", "prompt": "Improve typing and robustness of a Python module used for log processing.\nRequirements: provide type-safe APIs (generics/protocols where appropriate) and include tests.\nConstraints: must include docstrings, must include tests, must avoid global state, must provide clear error messages.\nDifficulty: medium. Include a small CLI example if relevant.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Callable, Generic, Iterable, Protocol, TypeVar\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\nclass Mapper(Protocol[T, R]):\n    def __call__(self, x: T) -> R: ...\n\n@dataclass(frozen=True, slots=True)\nclass Batch(Generic[T]):\n    items: tuple[T, ...]\n\ndef map_batch(batch: Batch[T], fn: Mapper[T, R]) -> Batch[R]:\n    return Batch(items=tuple(fn(x) for x in batch.items))\n\ndef chunked(xs: Iterable[T], size: int) -> Iterable[Batch[T]]:\n    if size <= 0:\n        raise ValueError(\"size must be > 0\")\n    buf: list[T] = []\n    for x in xs:\n        buf.append(x)\n        if len(buf) == size:\n            yield Batch(items=tuple(buf))\n            buf.clear()\n    if buf:\n        yield Batch(items=tuple(buf))\n```\n\n```python\nimport pytest\nfrom module_under_test import Batch, map_batch, chunked\n\ndef test_map_batch():\n    b = Batch(items=(1,2,3))\n    assert map_batch(b, lambda x: x*2).items == (2,4,6)\n\ndef test_chunked():\n    out = list(chunked([1,2,3,4,5], 2))\n    assert [b.items for b in out] == [(1,2),(3,4),(5,)]\n```\n", "metadata": {"tags": ["python", "mypy", "typing"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000063", "task_type": "typing", "difficulty": "hard", "prompt": "Improve typing and robustness of a Python module used for report generation.\nRequirements: provide type-safe APIs (generics/protocols where appropriate) and include tests.\nConstraints: must include type hints, stdlib-only.\nDifficulty: hard. Support both Windows and POSIX paths correctly.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Callable, Generic, Iterable, Protocol, TypeVar\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\nclass Mapper(Protocol[T, R]):\n    def __call__(self, x: T) -> R: ...\n\n@dataclass(frozen=True, slots=True)\nclass Batch(Generic[T]):\n    items: tuple[T, ...]\n\ndef map_batch(batch: Batch[T], fn: Mapper[T, R]) -> Batch[R]:\n    return Batch(items=tuple(fn(x) for x in batch.items))\n\ndef chunked(xs: Iterable[T], size: int) -> Iterable[Batch[T]]:\n    if size <= 0:\n        raise ValueError(\"size must be > 0\")\n    buf: list[T] = []\n    for x in xs:\n        buf.append(x)\n        if len(buf) == size:\n            yield Batch(items=tuple(buf))\n            buf.clear()\n    if buf:\n        yield Batch(items=tuple(buf))\n```\n\n```python\nimport pytest\nfrom module_under_test import Batch, map_batch, chunked\n\ndef test_map_batch():\n    b = Batch(items=(1,2,3))\n    assert map_batch(b, lambda x: x*2).items == (2,4,6)\n\ndef test_chunked():\n    out = list(chunked([1,2,3,4,5], 2))\n    assert [b.items for b in out] == [(1,2),(3,4),(5,)]\n```\n", "metadata": {"tags": ["python", "mypy", "typing"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000064", "task_type": "typing", "difficulty": "hard", "prompt": "Improve typing and robustness of a Python module used for report generation.\nRequirements: provide type-safe APIs (generics/protocols where appropriate) and include tests.\nConstraints: must be streaming-friendly, must be memory-efficient.\nDifficulty: hard. Support both Windows and POSIX paths correctly.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Callable, Generic, Iterable, Protocol, TypeVar\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\nclass Mapper(Protocol[T, R]):\n    def __call__(self, x: T) -> R: ...\n\n@dataclass(frozen=True, slots=True)\nclass Batch(Generic[T]):\n    items: tuple[T, ...]\n\ndef map_batch(batch: Batch[T], fn: Mapper[T, R]) -> Batch[R]:\n    return Batch(items=tuple(fn(x) for x in batch.items))\n\ndef chunked(xs: Iterable[T], size: int) -> Iterable[Batch[T]]:\n    if size <= 0:\n        raise ValueError(\"size must be > 0\")\n    buf: list[T] = []\n    for x in xs:\n        buf.append(x)\n        if len(buf) == size:\n            yield Batch(items=tuple(buf))\n            buf.clear()\n    if buf:\n        yield Batch(items=tuple(buf))\n```\n\n```python\nimport pytest\nfrom module_under_test import Batch, map_batch, chunked\n\ndef test_map_batch():\n    b = Batch(items=(1,2,3))\n    assert map_batch(b, lambda x: x*2).items == (2,4,6)\n\ndef test_chunked():\n    out = list(chunked([1,2,3,4,5], 2))\n    assert [b.items for b in out] == [(1,2),(3,4),(5,)]\n```\n", "metadata": {"tags": ["python", "mypy", "typing"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000065", "task_type": "typing", "difficulty": "expert", "prompt": "Improve typing and robustness of a Python module used for metrics aggregation.\nRequirements: provide type-safe APIs (generics/protocols where appropriate) and include tests.\nConstraints: stdlib-only, must provide clear error messages, must be memory-efficient.\nDifficulty: expert. Support both Windows and POSIX paths correctly.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Callable, Generic, Iterable, Protocol, TypeVar\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\nclass Mapper(Protocol[T, R]):\n    def __call__(self, x: T) -> R: ...\n\n@dataclass(frozen=True, slots=True)\nclass Batch(Generic[T]):\n    items: tuple[T, ...]\n\ndef map_batch(batch: Batch[T], fn: Mapper[T, R]) -> Batch[R]:\n    return Batch(items=tuple(fn(x) for x in batch.items))\n\ndef chunked(xs: Iterable[T], size: int) -> Iterable[Batch[T]]:\n    if size <= 0:\n        raise ValueError(\"size must be > 0\")\n    buf: list[T] = []\n    for x in xs:\n        buf.append(x)\n        if len(buf) == size:\n            yield Batch(items=tuple(buf))\n            buf.clear()\n    if buf:\n        yield Batch(items=tuple(buf))\n```\n\n```python\nimport pytest\nfrom module_under_test import Batch, map_batch, chunked\n\ndef test_map_batch():\n    b = Batch(items=(1,2,3))\n    assert map_batch(b, lambda x: x*2).items == (2,4,6)\n\ndef test_chunked():\n    out = list(chunked([1,2,3,4,5], 2))\n    assert [b.items for b in out] == [(1,2),(3,4),(5,)]\n```\n", "metadata": {"tags": ["python", "mypy", "typing"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000066", "task_type": "typing", "difficulty": "expert", "prompt": "Improve typing and robustness of a Python module used for event dispatching.\nRequirements: provide type-safe APIs (generics/protocols where appropriate) and include tests.\nConstraints: must include tests, must include docstrings, stdlib-only.\nDifficulty: expert. Provide helpful exceptions with actionable messages.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Callable, Generic, Iterable, Protocol, TypeVar\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\nclass Mapper(Protocol[T, R]):\n    def __call__(self, x: T) -> R: ...\n\n@dataclass(frozen=True, slots=True)\nclass Batch(Generic[T]):\n    items: tuple[T, ...]\n\ndef map_batch(batch: Batch[T], fn: Mapper[T, R]) -> Batch[R]:\n    return Batch(items=tuple(fn(x) for x in batch.items))\n\ndef chunked(xs: Iterable[T], size: int) -> Iterable[Batch[T]]:\n    if size <= 0:\n        raise ValueError(\"size must be > 0\")\n    buf: list[T] = []\n    for x in xs:\n        buf.append(x)\n        if len(buf) == size:\n            yield Batch(items=tuple(buf))\n            buf.clear()\n    if buf:\n        yield Batch(items=tuple(buf))\n```\n\n```python\nimport pytest\nfrom module_under_test import Batch, map_batch, chunked\n\ndef test_map_batch():\n    b = Batch(items=(1,2,3))\n    assert map_batch(b, lambda x: x*2).items == (2,4,6)\n\ndef test_chunked():\n    out = list(chunked([1,2,3,4,5], 2))\n    assert [b.items for b in out] == [(1,2),(3,4),(5,)]\n```\n", "metadata": {"tags": ["python", "mypy", "typing"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000067", "task_type": "typing", "difficulty": "expert", "prompt": "Improve typing and robustness of a Python module used for parsing.\nRequirements: provide type-safe APIs (generics/protocols where appropriate) and include tests.\nConstraints: must include docstrings, stdlib-only, must be memory-efficient, must handle Windows paths safely.\nDifficulty: expert. Include a small CLI example if relevant.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Callable, Generic, Iterable, Protocol, TypeVar\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\nclass Mapper(Protocol[T, R]):\n    def __call__(self, x: T) -> R: ...\n\n@dataclass(frozen=True, slots=True)\nclass Batch(Generic[T]):\n    items: tuple[T, ...]\n\ndef map_batch(batch: Batch[T], fn: Mapper[T, R]) -> Batch[R]:\n    return Batch(items=tuple(fn(x) for x in batch.items))\n\ndef chunked(xs: Iterable[T], size: int) -> Iterable[Batch[T]]:\n    if size <= 0:\n        raise ValueError(\"size must be > 0\")\n    buf: list[T] = []\n    for x in xs:\n        buf.append(x)\n        if len(buf) == size:\n            yield Batch(items=tuple(buf))\n            buf.clear()\n    if buf:\n        yield Batch(items=tuple(buf))\n```\n\n```python\nimport pytest\nfrom module_under_test import Batch, map_batch, chunked\n\ndef test_map_batch():\n    b = Batch(items=(1,2,3))\n    assert map_batch(b, lambda x: x*2).items == (2,4,6)\n\ndef test_chunked():\n    out = list(chunked([1,2,3,4,5], 2))\n    assert [b.items for b in out] == [(1,2),(3,4),(5,)]\n```\n", "metadata": {"tags": ["python", "mypy", "typing"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000068", "task_type": "typing", "difficulty": "medium", "prompt": "Improve typing and robustness of a Python module used for task scheduling.\nRequirements: provide type-safe APIs (generics/protocols where appropriate) and include tests.\nConstraints: must be streaming-friendly, must provide clear error messages, must include tests, must be memory-efficient.\nDifficulty: medium. Support both Windows and POSIX paths correctly.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Callable, Generic, Iterable, Protocol, TypeVar\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\nclass Mapper(Protocol[T, R]):\n    def __call__(self, x: T) -> R: ...\n\n@dataclass(frozen=True, slots=True)\nclass Batch(Generic[T]):\n    items: tuple[T, ...]\n\ndef map_batch(batch: Batch[T], fn: Mapper[T, R]) -> Batch[R]:\n    return Batch(items=tuple(fn(x) for x in batch.items))\n\ndef chunked(xs: Iterable[T], size: int) -> Iterable[Batch[T]]:\n    if size <= 0:\n        raise ValueError(\"size must be > 0\")\n    buf: list[T] = []\n    for x in xs:\n        buf.append(x)\n        if len(buf) == size:\n            yield Batch(items=tuple(buf))\n            buf.clear()\n    if buf:\n        yield Batch(items=tuple(buf))\n```\n\n```python\nimport pytest\nfrom module_under_test import Batch, map_batch, chunked\n\ndef test_map_batch():\n    b = Batch(items=(1,2,3))\n    assert map_batch(b, lambda x: x*2).items == (2,4,6)\n\ndef test_chunked():\n    out = list(chunked([1,2,3,4,5], 2))\n    assert [b.items for b in out] == [(1,2),(3,4),(5,)]\n```\n", "metadata": {"tags": ["python", "mypy", "typing"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000069", "task_type": "typing", "difficulty": "expert", "prompt": "Improve typing and robustness of a Python module used for report generation.\nRequirements: provide type-safe APIs (generics/protocols where appropriate) and include tests.\nConstraints: must include docstrings, must provide clear error messages, must avoid global state, must be memory-efficient.\nDifficulty: expert. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: production-ready reference solution.\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Callable, Generic, Iterable, Protocol, TypeVar\n\nT = TypeVar(\"T\")\nR = TypeVar(\"R\")\n\nclass Mapper(Protocol[T, R]):\n    def __call__(self, x: T) -> R: ...\n\n@dataclass(frozen=True, slots=True)\nclass Batch(Generic[T]):\n    items: tuple[T, ...]\n\ndef map_batch(batch: Batch[T], fn: Mapper[T, R]) -> Batch[R]:\n    return Batch(items=tuple(fn(x) for x in batch.items))\n\ndef chunked(xs: Iterable[T], size: int) -> Iterable[Batch[T]]:\n    if size <= 0:\n        raise ValueError(\"size must be > 0\")\n    buf: list[T] = []\n    for x in xs:\n        buf.append(x)\n        if len(buf) == size:\n            yield Batch(items=tuple(buf))\n            buf.clear()\n    if buf:\n        yield Batch(items=tuple(buf))\n```\n\n```python\nimport pytest\nfrom module_under_test import Batch, map_batch, chunked\n\ndef test_map_batch():\n    b = Batch(items=(1,2,3))\n    assert map_batch(b, lambda x: x*2).items == (2,4,6)\n\ndef test_chunked():\n    out = list(chunked([1,2,3,4,5], 2))\n    assert [b.items for b in out] == [(1,2),(3,4),(5,)]\n```\n", "metadata": {"tags": ["python", "mypy", "typing"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000070", "task_type": "performance", "difficulty": "hard", "prompt": "Optimize a Python workflow for metrics aggregation.\nRequirements: provide an optimized implementation and a small benchmark harness.\nConstraints: must avoid global state, must handle Windows paths safely, must include type hints, must provide clear error messages.\nDifficulty: hard. Include a small CLI example if relevant.", "response": "Deliverable: production-ready reference solution.\nStrategy:\n- Avoid repeated parsing; precompile regex if needed; use local variables; stream processing.\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport time\n\n@dataclass(frozen=True, slots=True)\nclass Stats:\n    count: int\n    total: int\n\ndef compute_stats_stream(values: Iterable[int]) -> Stats:\n    c = 0\n    t = 0\n    for v in values:\n        c += 1\n        t += v\n    return Stats(c, t)\n\ndef bench(n: int = 1_000_00) -> float:\n    data = range(n)\n    t0 = time.perf_counter()\n    _ = compute_stats_stream(data)\n    return time.perf_counter() - t0\n```\n\n```python\nfrom module_under_test import bench\ndef test_bench_runs_fast_enough():\n    # Sanity: should run quickly on typical machines; adjust threshold for CI.\n    assert bench(50_000) < 0.5\n```\n", "metadata": {"tags": ["python", "profiling", "optimization"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000071", "task_type": "performance", "difficulty": "medium", "prompt": "Optimize a Python workflow for data modeling.\nRequirements: provide an optimized implementation and a small benchmark harness.\nConstraints: must include tests, must be easy to extend, must be memory-efficient.\nDifficulty: medium. Include a small CLI example if relevant.", "response": "Deliverable: corrected code and verification tests.\nStrategy:\n- Avoid repeated parsing; precompile regex if needed; use local variables; stream processing.\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport time\n\n@dataclass(frozen=True, slots=True)\nclass Stats:\n    count: int\n    total: int\n\ndef compute_stats_stream(values: Iterable[int]) -> Stats:\n    c = 0\n    t = 0\n    for v in values:\n        c += 1\n        t += v\n    return Stats(c, t)\n\ndef bench(n: int = 1_000_00) -> float:\n    data = range(n)\n    t0 = time.perf_counter()\n    _ = compute_stats_stream(data)\n    return time.perf_counter() - t0\n```\n\n```python\nfrom module_under_test import bench\ndef test_bench_runs_fast_enough():\n    # Sanity: should run quickly on typical machines; adjust threshold for CI.\n    assert bench(50_000) < 0.5\n```\n", "metadata": {"tags": ["python", "profiling", "optimization"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000072", "task_type": "performance", "difficulty": "easy", "prompt": "Optimize a Python workflow for task scheduling.\nRequirements: provide an optimized implementation and a small benchmark harness.\nConstraints: must include type hints, stdlib-only, must provide clear error messages.\nDifficulty: easy. Guarantee deterministic output ordering.", "response": "Deliverable: production-ready reference solution.\nStrategy:\n- Avoid repeated parsing; precompile regex if needed; use local variables; stream processing.\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport time\n\n@dataclass(frozen=True, slots=True)\nclass Stats:\n    count: int\n    total: int\n\ndef compute_stats_stream(values: Iterable[int]) -> Stats:\n    c = 0\n    t = 0\n    for v in values:\n        c += 1\n        t += v\n    return Stats(c, t)\n\ndef bench(n: int = 1_000_00) -> float:\n    data = range(n)\n    t0 = time.perf_counter()\n    _ = compute_stats_stream(data)\n    return time.perf_counter() - t0\n```\n\n```python\nfrom module_under_test import bench\ndef test_bench_runs_fast_enough():\n    # Sanity: should run quickly on typical machines; adjust threshold for CI.\n    assert bench(50_000) < 0.5\n```\n", "metadata": {"tags": ["python", "profiling", "optimization"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000073", "task_type": "performance", "difficulty": "medium", "prompt": "Optimize a Python workflow for time series.\nRequirements: provide an optimized implementation and a small benchmark harness.\nConstraints: must avoid global state, must include type hints, stdlib-only, must provide clear error messages.\nDifficulty: medium. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: production-ready reference solution.\nStrategy:\n- Avoid repeated parsing; precompile regex if needed; use local variables; stream processing.\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport time\n\n@dataclass(frozen=True, slots=True)\nclass Stats:\n    count: int\n    total: int\n\ndef compute_stats_stream(values: Iterable[int]) -> Stats:\n    c = 0\n    t = 0\n    for v in values:\n        c += 1\n        t += v\n    return Stats(c, t)\n\ndef bench(n: int = 1_000_00) -> float:\n    data = range(n)\n    t0 = time.perf_counter()\n    _ = compute_stats_stream(data)\n    return time.perf_counter() - t0\n```\n\n```python\nfrom module_under_test import bench\ndef test_bench_runs_fast_enough():\n    # Sanity: should run quickly on typical machines; adjust threshold for CI.\n    assert bench(50_000) < 0.5\n```\n", "metadata": {"tags": ["python", "profiling", "optimization"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000074", "task_type": "performance", "difficulty": "medium", "prompt": "Optimize a Python workflow for log processing.\nRequirements: provide an optimized implementation and a small benchmark harness.\nConstraints: must include docstrings, must be easy to extend, stdlib-only.\nDifficulty: medium. Guarantee deterministic output ordering.", "response": "Deliverable: implementation + tests.\nStrategy:\n- Avoid repeated parsing; precompile regex if needed; use local variables; stream processing.\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport time\n\n@dataclass(frozen=True, slots=True)\nclass Stats:\n    count: int\n    total: int\n\ndef compute_stats_stream(values: Iterable[int]) -> Stats:\n    c = 0\n    t = 0\n    for v in values:\n        c += 1\n        t += v\n    return Stats(c, t)\n\ndef bench(n: int = 1_000_00) -> float:\n    data = range(n)\n    t0 = time.perf_counter()\n    _ = compute_stats_stream(data)\n    return time.perf_counter() - t0\n```\n\n```python\nfrom module_under_test import bench\ndef test_bench_runs_fast_enough():\n    # Sanity: should run quickly on typical machines; adjust threshold for CI.\n    assert bench(50_000) < 0.5\n```\n", "metadata": {"tags": ["python", "profiling", "optimization"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000075", "task_type": "performance", "difficulty": "hard", "prompt": "Optimize a Python workflow for configuration loading.\nRequirements: provide an optimized implementation and a small benchmark harness.\nConstraints: must include docstrings, must provide clear error messages, must be streaming-friendly.\nDifficulty: hard. Ensure the API is testable via dependency injection.", "response": "Deliverable: API + reference implementation + tests.\nStrategy:\n- Avoid repeated parsing; precompile regex if needed; use local variables; stream processing.\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport time\n\n@dataclass(frozen=True, slots=True)\nclass Stats:\n    count: int\n    total: int\n\ndef compute_stats_stream(values: Iterable[int]) -> Stats:\n    c = 0\n    t = 0\n    for v in values:\n        c += 1\n        t += v\n    return Stats(c, t)\n\ndef bench(n: int = 1_000_00) -> float:\n    data = range(n)\n    t0 = time.perf_counter()\n    _ = compute_stats_stream(data)\n    return time.perf_counter() - t0\n```\n\n```python\nfrom module_under_test import bench\ndef test_bench_runs_fast_enough():\n    # Sanity: should run quickly on typical machines; adjust threshold for CI.\n    assert bench(50_000) < 0.5\n```\n", "metadata": {"tags": ["python", "profiling", "optimization"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000076", "task_type": "performance", "difficulty": "hard", "prompt": "Optimize a Python workflow for metrics aggregation.\nRequirements: provide an optimized implementation and a small benchmark harness.\nConstraints: must be streaming-friendly, must include type hints, stdlib-only.\nDifficulty: hard. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: production-ready reference solution.\nStrategy:\n- Avoid repeated parsing; precompile regex if needed; use local variables; stream processing.\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport time\n\n@dataclass(frozen=True, slots=True)\nclass Stats:\n    count: int\n    total: int\n\ndef compute_stats_stream(values: Iterable[int]) -> Stats:\n    c = 0\n    t = 0\n    for v in values:\n        c += 1\n        t += v\n    return Stats(c, t)\n\ndef bench(n: int = 1_000_00) -> float:\n    data = range(n)\n    t0 = time.perf_counter()\n    _ = compute_stats_stream(data)\n    return time.perf_counter() - t0\n```\n\n```python\nfrom module_under_test import bench\ndef test_bench_runs_fast_enough():\n    # Sanity: should run quickly on typical machines; adjust threshold for CI.\n    assert bench(50_000) < 0.5\n```\n", "metadata": {"tags": ["python", "profiling", "optimization"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000077", "task_type": "performance", "difficulty": "hard", "prompt": "Optimize a Python workflow for rate limiting.\nRequirements: provide an optimized implementation and a small benchmark harness.\nConstraints: must include tests, must be memory-efficient.\nDifficulty: hard. Guarantee deterministic output ordering.", "response": "Deliverable: production-ready reference solution.\nStrategy:\n- Avoid repeated parsing; precompile regex if needed; use local variables; stream processing.\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport time\n\n@dataclass(frozen=True, slots=True)\nclass Stats:\n    count: int\n    total: int\n\ndef compute_stats_stream(values: Iterable[int]) -> Stats:\n    c = 0\n    t = 0\n    for v in values:\n        c += 1\n        t += v\n    return Stats(c, t)\n\ndef bench(n: int = 1_000_00) -> float:\n    data = range(n)\n    t0 = time.perf_counter()\n    _ = compute_stats_stream(data)\n    return time.perf_counter() - t0\n```\n\n```python\nfrom module_under_test import bench\ndef test_bench_runs_fast_enough():\n    # Sanity: should run quickly on typical machines; adjust threshold for CI.\n    assert bench(50_000) < 0.5\n```\n", "metadata": {"tags": ["python", "profiling", "optimization"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000078", "task_type": "concurrency", "difficulty": "easy", "prompt": "Implement a concurrent Python solution for configuration loading.\nRequirements: choose appropriate concurrency model, handle cancellation/timeouts, include tests.\nConstraints: must be streaming-friendly, must include type hints, must include tests.\nDifficulty: easy. Provide helpful exceptions with actionable messages.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nimport asyncio\nfrom dataclasses import dataclass\nfrom typing import Awaitable, Callable, Iterable, Optional\n\n@dataclass(frozen=True, slots=True)\nclass JobResult:\n    ok: bool\n    value: Optional[int]\n    error: Optional[str]\n\nasync def run_bounded(\n    factories: Iterable[Callable[[], Awaitable[int]]],\n    *,\n    limit: int = 10,\n    timeout_s: float = 5.0,\n) -> list[JobResult]:\n    if limit <= 0:\n        raise ValueError(\"limit must be > 0\")\n    sem = asyncio.Semaphore(limit)\n    results: list[JobResult] = []\n\n    async def one(factory: Callable[[], Awaitable[int]]) -> None:\n        async with sem:\n            try:\n                v = await asyncio.wait_for(factory(), timeout=timeout_s)\n                results.append(JobResult(True, v, None))\n            except Exception as e:\n                results.append(JobResult(False, None, str(e)))\n\n    tasks = [asyncio.create_task(one(f)) for f in factories]\n    try:\n        await asyncio.gather(*tasks)\n    finally:\n        for t in tasks:\n            if not t.done():\n                t.cancel()\n    return results\n```\n\n```python\nimport asyncio\nimport pytest\nfrom module_under_test import run_bounded\n\n@pytest.mark.asyncio\nasync def test_run_bounded_collects():\n    async def work(x: int) -> int:\n        await asyncio.sleep(0.01)\n        return x\n    fs = [(lambda i=i: work(i)) for i in range(30)]\n    out = await run_bounded(fs, limit=5, timeout_s=1.0)\n    assert len(out) == 30\n    assert all(r.ok for r in out)\n```\n", "metadata": {"tags": ["python", "asyncio", "threading"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000079", "task_type": "concurrency", "difficulty": "expert", "prompt": "Implement a concurrent Python solution for rate limiting.\nRequirements: choose appropriate concurrency model, handle cancellation/timeouts, include tests.\nConstraints: stdlib-only, must avoid global state, must be memory-efficient.\nDifficulty: expert. Include a small CLI example if relevant.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nimport asyncio\nfrom dataclasses import dataclass\nfrom typing import Awaitable, Callable, Iterable, Optional\n\n@dataclass(frozen=True, slots=True)\nclass JobResult:\n    ok: bool\n    value: Optional[int]\n    error: Optional[str]\n\nasync def run_bounded(\n    factories: Iterable[Callable[[], Awaitable[int]]],\n    *,\n    limit: int = 10,\n    timeout_s: float = 5.0,\n) -> list[JobResult]:\n    if limit <= 0:\n        raise ValueError(\"limit must be > 0\")\n    sem = asyncio.Semaphore(limit)\n    results: list[JobResult] = []\n\n    async def one(factory: Callable[[], Awaitable[int]]) -> None:\n        async with sem:\n            try:\n                v = await asyncio.wait_for(factory(), timeout=timeout_s)\n                results.append(JobResult(True, v, None))\n            except Exception as e:\n                results.append(JobResult(False, None, str(e)))\n\n    tasks = [asyncio.create_task(one(f)) for f in factories]\n    try:\n        await asyncio.gather(*tasks)\n    finally:\n        for t in tasks:\n            if not t.done():\n                t.cancel()\n    return results\n```\n\n```python\nimport asyncio\nimport pytest\nfrom module_under_test import run_bounded\n\n@pytest.mark.asyncio\nasync def test_run_bounded_collects():\n    async def work(x: int) -> int:\n        await asyncio.sleep(0.01)\n        return x\n    fs = [(lambda i=i: work(i)) for i in range(30)]\n    out = await run_bounded(fs, limit=5, timeout_s=1.0)\n    assert len(out) == 30\n    assert all(r.ok for r in out)\n```\n", "metadata": {"tags": ["python", "asyncio", "threading"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000080", "task_type": "concurrency", "difficulty": "expert", "prompt": "Implement a concurrent Python solution for event dispatching.\nRequirements: choose appropriate concurrency model, handle cancellation/timeouts, include tests.\nConstraints: must be streaming-friendly, must avoid global state, must include type hints, must provide clear error messages.\nDifficulty: expert. Support both Windows and POSIX paths correctly.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nimport asyncio\nfrom dataclasses import dataclass\nfrom typing import Awaitable, Callable, Iterable, Optional\n\n@dataclass(frozen=True, slots=True)\nclass JobResult:\n    ok: bool\n    value: Optional[int]\n    error: Optional[str]\n\nasync def run_bounded(\n    factories: Iterable[Callable[[], Awaitable[int]]],\n    *,\n    limit: int = 10,\n    timeout_s: float = 5.0,\n) -> list[JobResult]:\n    if limit <= 0:\n        raise ValueError(\"limit must be > 0\")\n    sem = asyncio.Semaphore(limit)\n    results: list[JobResult] = []\n\n    async def one(factory: Callable[[], Awaitable[int]]) -> None:\n        async with sem:\n            try:\n                v = await asyncio.wait_for(factory(), timeout=timeout_s)\n                results.append(JobResult(True, v, None))\n            except Exception as e:\n                results.append(JobResult(False, None, str(e)))\n\n    tasks = [asyncio.create_task(one(f)) for f in factories]\n    try:\n        await asyncio.gather(*tasks)\n    finally:\n        for t in tasks:\n            if not t.done():\n                t.cancel()\n    return results\n```\n\n```python\nimport asyncio\nimport pytest\nfrom module_under_test import run_bounded\n\n@pytest.mark.asyncio\nasync def test_run_bounded_collects():\n    async def work(x: int) -> int:\n        await asyncio.sleep(0.01)\n        return x\n    fs = [(lambda i=i: work(i)) for i in range(30)]\n    out = await run_bounded(fs, limit=5, timeout_s=1.0)\n    assert len(out) == 30\n    assert all(r.ok for r in out)\n```\n", "metadata": {"tags": ["python", "asyncio", "threading"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000081", "task_type": "concurrency", "difficulty": "medium", "prompt": "Implement a concurrent Python solution for validation.\nRequirements: choose appropriate concurrency model, handle cancellation/timeouts, include tests.\nConstraints: must include docstrings, must handle Windows paths safely, must be easy to extend, must avoid global state.\nDifficulty: medium. Include a small CLI example if relevant.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\nimport asyncio\nfrom dataclasses import dataclass\nfrom typing import Awaitable, Callable, Iterable, Optional\n\n@dataclass(frozen=True, slots=True)\nclass JobResult:\n    ok: bool\n    value: Optional[int]\n    error: Optional[str]\n\nasync def run_bounded(\n    factories: Iterable[Callable[[], Awaitable[int]]],\n    *,\n    limit: int = 10,\n    timeout_s: float = 5.0,\n) -> list[JobResult]:\n    if limit <= 0:\n        raise ValueError(\"limit must be > 0\")\n    sem = asyncio.Semaphore(limit)\n    results: list[JobResult] = []\n\n    async def one(factory: Callable[[], Awaitable[int]]) -> None:\n        async with sem:\n            try:\n                v = await asyncio.wait_for(factory(), timeout=timeout_s)\n                results.append(JobResult(True, v, None))\n            except Exception as e:\n                results.append(JobResult(False, None, str(e)))\n\n    tasks = [asyncio.create_task(one(f)) for f in factories]\n    try:\n        await asyncio.gather(*tasks)\n    finally:\n        for t in tasks:\n            if not t.done():\n                t.cancel()\n    return results\n```\n\n```python\nimport asyncio\nimport pytest\nfrom module_under_test import run_bounded\n\n@pytest.mark.asyncio\nasync def test_run_bounded_collects():\n    async def work(x: int) -> int:\n        await asyncio.sleep(0.01)\n        return x\n    fs = [(lambda i=i: work(i)) for i in range(30)]\n    out = await run_bounded(fs, limit=5, timeout_s=1.0)\n    assert len(out) == 30\n    assert all(r.ok for r in out)\n```\n", "metadata": {"tags": ["python", "asyncio", "threading"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000082", "task_type": "concurrency", "difficulty": "medium", "prompt": "Implement a concurrent Python solution for time series.\nRequirements: choose appropriate concurrency model, handle cancellation/timeouts, include tests.\nConstraints: must be streaming-friendly, must be easy to extend, must include tests, must provide clear error messages.\nDifficulty: medium. Ensure the API is testable via dependency injection.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nimport asyncio\nfrom dataclasses import dataclass\nfrom typing import Awaitable, Callable, Iterable, Optional\n\n@dataclass(frozen=True, slots=True)\nclass JobResult:\n    ok: bool\n    value: Optional[int]\n    error: Optional[str]\n\nasync def run_bounded(\n    factories: Iterable[Callable[[], Awaitable[int]]],\n    *,\n    limit: int = 10,\n    timeout_s: float = 5.0,\n) -> list[JobResult]:\n    if limit <= 0:\n        raise ValueError(\"limit must be > 0\")\n    sem = asyncio.Semaphore(limit)\n    results: list[JobResult] = []\n\n    async def one(factory: Callable[[], Awaitable[int]]) -> None:\n        async with sem:\n            try:\n                v = await asyncio.wait_for(factory(), timeout=timeout_s)\n                results.append(JobResult(True, v, None))\n            except Exception as e:\n                results.append(JobResult(False, None, str(e)))\n\n    tasks = [asyncio.create_task(one(f)) for f in factories]\n    try:\n        await asyncio.gather(*tasks)\n    finally:\n        for t in tasks:\n            if not t.done():\n                t.cancel()\n    return results\n```\n\n```python\nimport asyncio\nimport pytest\nfrom module_under_test import run_bounded\n\n@pytest.mark.asyncio\nasync def test_run_bounded_collects():\n    async def work(x: int) -> int:\n        await asyncio.sleep(0.01)\n        return x\n    fs = [(lambda i=i: work(i)) for i in range(30)]\n    out = await run_bounded(fs, limit=5, timeout_s=1.0)\n    assert len(out) == 30\n    assert all(r.ok for r in out)\n```\n", "metadata": {"tags": ["python", "asyncio", "threading"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000083", "task_type": "concurrency", "difficulty": "easy", "prompt": "Implement a concurrent Python solution for validation.\nRequirements: choose appropriate concurrency model, handle cancellation/timeouts, include tests.\nConstraints: must include type hints, must be streaming-friendly, must include tests.\nDifficulty: easy. Include a small CLI example if relevant.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nimport asyncio\nfrom dataclasses import dataclass\nfrom typing import Awaitable, Callable, Iterable, Optional\n\n@dataclass(frozen=True, slots=True)\nclass JobResult:\n    ok: bool\n    value: Optional[int]\n    error: Optional[str]\n\nasync def run_bounded(\n    factories: Iterable[Callable[[], Awaitable[int]]],\n    *,\n    limit: int = 10,\n    timeout_s: float = 5.0,\n) -> list[JobResult]:\n    if limit <= 0:\n        raise ValueError(\"limit must be > 0\")\n    sem = asyncio.Semaphore(limit)\n    results: list[JobResult] = []\n\n    async def one(factory: Callable[[], Awaitable[int]]) -> None:\n        async with sem:\n            try:\n                v = await asyncio.wait_for(factory(), timeout=timeout_s)\n                results.append(JobResult(True, v, None))\n            except Exception as e:\n                results.append(JobResult(False, None, str(e)))\n\n    tasks = [asyncio.create_task(one(f)) for f in factories]\n    try:\n        await asyncio.gather(*tasks)\n    finally:\n        for t in tasks:\n            if not t.done():\n                t.cancel()\n    return results\n```\n\n```python\nimport asyncio\nimport pytest\nfrom module_under_test import run_bounded\n\n@pytest.mark.asyncio\nasync def test_run_bounded_collects():\n    async def work(x: int) -> int:\n        await asyncio.sleep(0.01)\n        return x\n    fs = [(lambda i=i: work(i)) for i in range(30)]\n    out = await run_bounded(fs, limit=5, timeout_s=1.0)\n    assert len(out) == 30\n    assert all(r.ok for r in out)\n```\n", "metadata": {"tags": ["python", "asyncio", "threading"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000084", "task_type": "concurrency", "difficulty": "hard", "prompt": "Implement a concurrent Python solution for CLI tooling.\nRequirements: choose appropriate concurrency model, handle cancellation/timeouts, include tests.\nConstraints: must avoid global state, must include docstrings, must include tests.\nDifficulty: hard. Include a small CLI example if relevant.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\nimport asyncio\nfrom dataclasses import dataclass\nfrom typing import Awaitable, Callable, Iterable, Optional\n\n@dataclass(frozen=True, slots=True)\nclass JobResult:\n    ok: bool\n    value: Optional[int]\n    error: Optional[str]\n\nasync def run_bounded(\n    factories: Iterable[Callable[[], Awaitable[int]]],\n    *,\n    limit: int = 10,\n    timeout_s: float = 5.0,\n) -> list[JobResult]:\n    if limit <= 0:\n        raise ValueError(\"limit must be > 0\")\n    sem = asyncio.Semaphore(limit)\n    results: list[JobResult] = []\n\n    async def one(factory: Callable[[], Awaitable[int]]) -> None:\n        async with sem:\n            try:\n                v = await asyncio.wait_for(factory(), timeout=timeout_s)\n                results.append(JobResult(True, v, None))\n            except Exception as e:\n                results.append(JobResult(False, None, str(e)))\n\n    tasks = [asyncio.create_task(one(f)) for f in factories]\n    try:\n        await asyncio.gather(*tasks)\n    finally:\n        for t in tasks:\n            if not t.done():\n                t.cancel()\n    return results\n```\n\n```python\nimport asyncio\nimport pytest\nfrom module_under_test import run_bounded\n\n@pytest.mark.asyncio\nasync def test_run_bounded_collects():\n    async def work(x: int) -> int:\n        await asyncio.sleep(0.01)\n        return x\n    fs = [(lambda i=i: work(i)) for i in range(30)]\n    out = await run_bounded(fs, limit=5, timeout_s=1.0)\n    assert len(out) == 30\n    assert all(r.ok for r in out)\n```\n", "metadata": {"tags": ["python", "asyncio", "threading"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000085", "task_type": "api_design", "difficulty": "medium", "prompt": "Design a small, production-grade Python API for data modeling.\nRequirements: define data models and interfaces, implement reference version, include tests.\nConstraints: must include docstrings, stdlib-only, must handle Windows paths safely.\nDifficulty: medium. Avoid quadratic behavior on large inputs.", "response": "Deliverable: production-ready reference solution.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "interfaces", "dataclasses"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000086", "task_type": "api_design", "difficulty": "hard", "prompt": "Design a small, production-grade Python API for task scheduling.\nRequirements: define data models and interfaces, implement reference version, include tests.\nConstraints: must avoid global state, must be memory-efficient, must include tests, must handle Windows paths safely.\nDifficulty: hard. Guarantee deterministic output ordering.", "response": "Deliverable: corrected code and verification tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "interfaces", "dataclasses"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000087", "task_type": "api_design", "difficulty": "hard", "prompt": "Design a small, production-grade Python API for file synchronization.\nRequirements: define data models and interfaces, implement reference version, include tests.\nConstraints: must be streaming-friendly, must include docstrings.\nDifficulty: hard. Include a small CLI example if relevant.", "response": "Deliverable: API + reference implementation + tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "interfaces", "dataclasses"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000088", "task_type": "api_design", "difficulty": "expert", "prompt": "Design a small, production-grade Python API for ETL pipelines.\nRequirements: define data models and interfaces, implement reference version, include tests.\nConstraints: must be streaming-friendly, must handle Windows paths safely, stdlib-only.\nDifficulty: expert. Guarantee deterministic output ordering.", "response": "Deliverable: corrected code and verification tests.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "interfaces", "dataclasses"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000089", "task_type": "api_design", "difficulty": "easy", "prompt": "Design a small, production-grade Python API for validation.\nRequirements: define data models and interfaces, implement reference version, include tests.\nConstraints: stdlib-only, must include tests, must include type hints, must handle Windows paths safely.\nDifficulty: easy. Support both Windows and POSIX paths correctly.", "response": "Deliverable: production-ready reference solution.\nArchitecture:\n- Public facade class coordinating concerns (parsing, validation, persistence)\n- Small pure functions for testability\n- No global state; dependency injection for filesystem I/O\n\n```python\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Protocol, Iterable\nimport json\n\nclass Store(Protocol):\n    def write_text(self, path: Path, data: str) -> None: ...\n\nclass FSStore:\n    def write_text(self, path: Path, data: str) -> None:\n        path.parent.mkdir(parents=True, exist_ok=True)\n        path.write_text(data, encoding=\"utf-8\")\n\n@dataclass(frozen=True, slots=True)\nclass Item:\n    name: str\n    qty: int\n\ndef parse_items(lines: Iterable[str]) -> list[Item]:\n    out: list[Item] = []\n    for i, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        obj = json.loads(s)\n        if not isinstance(obj, dict):\n            raise ValueError(f\"line {i}: expected object\")\n        name = obj.get(\"name\")\n        qty = obj.get(\"qty\")\n        if not isinstance(name, str) or not name:\n            raise ValueError(f\"line {i}: name must be str\")\n        if not isinstance(qty, int) or qty < 0:\n            raise ValueError(f\"line {i}: qty must be non-negative int\")\n        out.append(Item(name, qty))\n    return out\n\nclass InventoryWriter:\n    def __init__(self, base_dir: Path, store: Store | None = None) -> None:\n        self._base = base_dir\n        self._store: Store = store or FSStore()\n\n    def write(self, rel_path: str, items: list[Item]) -> Path:\n        dst = (self._base / rel_path).resolve()\n        base = self._base.resolve()\n        if dst != base and base not in dst.parents:\n            raise ValueError(\"unsafe path traversal\")\n        payload = json.dumps([item.__dict__ for item in items], ensure_ascii=False)\n        self._store.write_text(dst, payload)\n        return dst\n```\n\n```python\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import InventoryWriter, parse_items\n\ndef test_parse_items_valid():\n    lines = [json.dumps({\"name\":\"x\",\"qty\":1})]\n    assert parse_items(lines)[0].name == \"x\"\n\ndef test_writer_blocks_traversal(tmp_path: Path):\n    w = InventoryWriter(tmp_path)\n    with pytest.raises(ValueError):\n        w.write(\"../evil.json\", [])\n```\n", "metadata": {"tags": ["python", "interfaces", "dataclasses"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000090", "task_type": "packaging", "difficulty": "expert", "prompt": "Create a small, production-ready Python package for file synchronization.\nRequirements: include module code, CLI entrypoint, logging, and a minimal pyproject.toml.\nConstraints: must include docstrings, must handle Windows paths safely, must avoid global state, must include type hints.\nDifficulty: expert. Guarantee deterministic output ordering.", "response": "Deliverable: production-ready reference solution.\nPackage layout:\n- mypkg/core.py\n- mypkg/cli.py\n- pyproject.toml\n\n```python\n# mypkg/core.py\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport logging\n\nlog = logging.getLogger(__name__)\n\n@dataclass(frozen=True, slots=True)\nclass Result:\n    total: int\n    count: int\n\ndef aggregate(values: Iterable[int]) -> Result:\n    total = 0\n    count = 0\n    for v in values:\n        if not isinstance(v, int):\n            raise TypeError(\"values must be ints\")\n        total += v\n        count += 1\n    log.debug(\"aggregate count=%s total=%s\", count, total)\n    return Result(total, count)\n```\n\n```python\n# mypkg/cli.py\nfrom __future__ import annotations\nimport argparse\nimport logging\nimport sys\nfrom .core import aggregate\n\ndef main(argv: list[str] | None = None) -> int:\n    p = argparse.ArgumentParser(prog=\"mypkg\")\n    p.add_argument(\"--log-level\", default=\"WARNING\")\n    args = p.parse_args(argv)\n    logging.basicConfig(level=getattr(logging, args.log_level.upper(), logging.WARNING))\n    data: list[int] = []\n    for line in sys.stdin:\n        line = line.strip()\n        if line:\n            data.append(int(line))\n    r = aggregate(data)\n    print(f\"count={r.count} total={r.total}\")\n    return 0\n```\n\n```toml\n# pyproject.toml\n[project]\nname = \"mypkg\"\nversion = \"0.1.0\"\nrequires-python = \">=3.10\"\n\n[project.scripts]\nmypkg = \"mypkg.cli:main\"\n```\n", "metadata": {"tags": ["python", "cli", "pyproject"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000091", "task_type": "packaging", "difficulty": "medium", "prompt": "Create a small, production-ready Python package for log processing.\nRequirements: include module code, CLI entrypoint, logging, and a minimal pyproject.toml.\nConstraints: must include docstrings, must handle Windows paths safely, must include tests, must be streaming-friendly.\nDifficulty: medium. Avoid quadratic behavior on large inputs.", "response": "Deliverable: corrected code and verification tests.\nPackage layout:\n- mypkg/core.py\n- mypkg/cli.py\n- pyproject.toml\n\n```python\n# mypkg/core.py\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport logging\n\nlog = logging.getLogger(__name__)\n\n@dataclass(frozen=True, slots=True)\nclass Result:\n    total: int\n    count: int\n\ndef aggregate(values: Iterable[int]) -> Result:\n    total = 0\n    count = 0\n    for v in values:\n        if not isinstance(v, int):\n            raise TypeError(\"values must be ints\")\n        total += v\n        count += 1\n    log.debug(\"aggregate count=%s total=%s\", count, total)\n    return Result(total, count)\n```\n\n```python\n# mypkg/cli.py\nfrom __future__ import annotations\nimport argparse\nimport logging\nimport sys\nfrom .core import aggregate\n\ndef main(argv: list[str] | None = None) -> int:\n    p = argparse.ArgumentParser(prog=\"mypkg\")\n    p.add_argument(\"--log-level\", default=\"WARNING\")\n    args = p.parse_args(argv)\n    logging.basicConfig(level=getattr(logging, args.log_level.upper(), logging.WARNING))\n    data: list[int] = []\n    for line in sys.stdin:\n        line = line.strip()\n        if line:\n            data.append(int(line))\n    r = aggregate(data)\n    print(f\"count={r.count} total={r.total}\")\n    return 0\n```\n\n```toml\n# pyproject.toml\n[project]\nname = \"mypkg\"\nversion = \"0.1.0\"\nrequires-python = \">=3.10\"\n\n[project.scripts]\nmypkg = \"mypkg.cli:main\"\n```\n", "metadata": {"tags": ["python", "cli", "pyproject"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000092", "task_type": "packaging", "difficulty": "easy", "prompt": "Create a small, production-ready Python package for event dispatching.\nRequirements: include module code, CLI entrypoint, logging, and a minimal pyproject.toml.\nConstraints: must provide clear error messages, must be streaming-friendly.\nDifficulty: easy. Avoid quadratic behavior on large inputs.", "response": "Deliverable: implementation + tests.\nPackage layout:\n- mypkg/core.py\n- mypkg/cli.py\n- pyproject.toml\n\n```python\n# mypkg/core.py\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport logging\n\nlog = logging.getLogger(__name__)\n\n@dataclass(frozen=True, slots=True)\nclass Result:\n    total: int\n    count: int\n\ndef aggregate(values: Iterable[int]) -> Result:\n    total = 0\n    count = 0\n    for v in values:\n        if not isinstance(v, int):\n            raise TypeError(\"values must be ints\")\n        total += v\n        count += 1\n    log.debug(\"aggregate count=%s total=%s\", count, total)\n    return Result(total, count)\n```\n\n```python\n# mypkg/cli.py\nfrom __future__ import annotations\nimport argparse\nimport logging\nimport sys\nfrom .core import aggregate\n\ndef main(argv: list[str] | None = None) -> int:\n    p = argparse.ArgumentParser(prog=\"mypkg\")\n    p.add_argument(\"--log-level\", default=\"WARNING\")\n    args = p.parse_args(argv)\n    logging.basicConfig(level=getattr(logging, args.log_level.upper(), logging.WARNING))\n    data: list[int] = []\n    for line in sys.stdin:\n        line = line.strip()\n        if line:\n            data.append(int(line))\n    r = aggregate(data)\n    print(f\"count={r.count} total={r.total}\")\n    return 0\n```\n\n```toml\n# pyproject.toml\n[project]\nname = \"mypkg\"\nversion = \"0.1.0\"\nrequires-python = \">=3.10\"\n\n[project.scripts]\nmypkg = \"mypkg.cli:main\"\n```\n", "metadata": {"tags": ["python", "cli", "pyproject"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000093", "task_type": "packaging", "difficulty": "expert", "prompt": "Create a small, production-ready Python package for rate limiting.\nRequirements: include module code, CLI entrypoint, logging, and a minimal pyproject.toml.\nConstraints: must avoid global state, must handle Windows paths safely, must be streaming-friendly.\nDifficulty: expert. Provide helpful exceptions with actionable messages.", "response": "Deliverable: production-ready reference solution.\nPackage layout:\n- mypkg/core.py\n- mypkg/cli.py\n- pyproject.toml\n\n```python\n# mypkg/core.py\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport logging\n\nlog = logging.getLogger(__name__)\n\n@dataclass(frozen=True, slots=True)\nclass Result:\n    total: int\n    count: int\n\ndef aggregate(values: Iterable[int]) -> Result:\n    total = 0\n    count = 0\n    for v in values:\n        if not isinstance(v, int):\n            raise TypeError(\"values must be ints\")\n        total += v\n        count += 1\n    log.debug(\"aggregate count=%s total=%s\", count, total)\n    return Result(total, count)\n```\n\n```python\n# mypkg/cli.py\nfrom __future__ import annotations\nimport argparse\nimport logging\nimport sys\nfrom .core import aggregate\n\ndef main(argv: list[str] | None = None) -> int:\n    p = argparse.ArgumentParser(prog=\"mypkg\")\n    p.add_argument(\"--log-level\", default=\"WARNING\")\n    args = p.parse_args(argv)\n    logging.basicConfig(level=getattr(logging, args.log_level.upper(), logging.WARNING))\n    data: list[int] = []\n    for line in sys.stdin:\n        line = line.strip()\n        if line:\n            data.append(int(line))\n    r = aggregate(data)\n    print(f\"count={r.count} total={r.total}\")\n    return 0\n```\n\n```toml\n# pyproject.toml\n[project]\nname = \"mypkg\"\nversion = \"0.1.0\"\nrequires-python = \">=3.10\"\n\n[project.scripts]\nmypkg = \"mypkg.cli:main\"\n```\n", "metadata": {"tags": ["python", "cli", "pyproject"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000094", "task_type": "packaging", "difficulty": "medium", "prompt": "Create a small, production-ready Python package for task scheduling.\nRequirements: include module code, CLI entrypoint, logging, and a minimal pyproject.toml.\nConstraints: must provide clear error messages, must avoid global state, stdlib-only.\nDifficulty: medium. Avoid quadratic behavior on large inputs.", "response": "Deliverable: corrected code and verification tests.\nPackage layout:\n- mypkg/core.py\n- mypkg/cli.py\n- pyproject.toml\n\n```python\n# mypkg/core.py\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import Iterable\nimport logging\n\nlog = logging.getLogger(__name__)\n\n@dataclass(frozen=True, slots=True)\nclass Result:\n    total: int\n    count: int\n\ndef aggregate(values: Iterable[int]) -> Result:\n    total = 0\n    count = 0\n    for v in values:\n        if not isinstance(v, int):\n            raise TypeError(\"values must be ints\")\n        total += v\n        count += 1\n    log.debug(\"aggregate count=%s total=%s\", count, total)\n    return Result(total, count)\n```\n\n```python\n# mypkg/cli.py\nfrom __future__ import annotations\nimport argparse\nimport logging\nimport sys\nfrom .core import aggregate\n\ndef main(argv: list[str] | None = None) -> int:\n    p = argparse.ArgumentParser(prog=\"mypkg\")\n    p.add_argument(\"--log-level\", default=\"WARNING\")\n    args = p.parse_args(argv)\n    logging.basicConfig(level=getattr(logging, args.log_level.upper(), logging.WARNING))\n    data: list[int] = []\n    for line in sys.stdin:\n        line = line.strip()\n        if line:\n            data.append(int(line))\n    r = aggregate(data)\n    print(f\"count={r.count} total={r.total}\")\n    return 0\n```\n\n```toml\n# pyproject.toml\n[project]\nname = \"mypkg\"\nversion = \"0.1.0\"\nrequires-python = \">=3.10\"\n\n[project.scripts]\nmypkg = \"mypkg.cli:main\"\n```\n", "metadata": {"tags": ["python", "cli", "pyproject"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000095", "task_type": "security_safe", "difficulty": "expert", "prompt": "Write secure, defensive Python code for parsing.\nRequirements: input validation, safe file handling, prevent path traversal, include tests.\nConstraints: must be easy to extend, must include type hints, must provide clear error messages.\nDifficulty: expert. Provide helpful exceptions with actionable messages.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "security", "validation"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000096", "task_type": "security_safe", "difficulty": "easy", "prompt": "Write secure, defensive Python code for task scheduling.\nRequirements: input validation, safe file handling, prevent path traversal, include tests.\nConstraints: must include docstrings, must handle Windows paths safely, stdlib-only.\nDifficulty: easy. Guarantee deterministic output ordering.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "security", "validation"], "has_code": true, "requires_thinking": false, "seed": 1337}}
{"id": "pygod_000097", "task_type": "security_safe", "difficulty": "expert", "prompt": "Write secure, defensive Python code for metrics aggregation.\nRequirements: input validation, safe file handling, prevent path traversal, include tests.\nConstraints: must include type hints, must provide clear error messages, must include docstrings, must be streaming-friendly.\nDifficulty: expert. Guarantee deterministic output ordering.", "response": "Deliverable: API + reference implementation + tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "security", "validation"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000098", "task_type": "security_safe", "difficulty": "expert", "prompt": "Write secure, defensive Python code for report generation.\nRequirements: input validation, safe file handling, prevent path traversal, include tests.\nConstraints: must be memory-efficient, must handle Windows paths safely, must be streaming-friendly.\nDifficulty: expert. Include a small CLI example if relevant.", "response": "Deliverable: implementation + tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "security", "validation"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000099", "task_type": "security_safe", "difficulty": "hard", "prompt": "Write secure, defensive Python code for task scheduling.\nRequirements: input validation, safe file handling, prevent path traversal, include tests.\nConstraints: must provide clear error messages, stdlib-only.\nDifficulty: hard. Handle empty inputs and malformed records gracefully.", "response": "Deliverable: production-ready reference solution.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "security", "validation"], "has_code": true, "requires_thinking": true, "seed": 1337}}
{"id": "pygod_000100", "task_type": "bugfix", "difficulty": "easy", "prompt": "Fix a subtle bug in a Python module used for data modeling.\nRequirements: explain root cause briefly, provide corrected code, and add regression tests.\nConstraints: must provide clear error messages, must be streaming-friendly.\nDifficulty: easy. Support both Windows and POSIX paths correctly.", "response": "Deliverable: corrected code and verification tests.\n```python\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable\nimport json\n\n\nclass ValidationError(ValueError):\n    \"\"\"Raised when input validation fails.\"\"\"\n\n\ndef safe_join(base: Path, *parts: str) -> Path:\n    \"\"\"Join paths while preventing traversal outside base.\"\"\"\n    b = base.resolve()\n    p = (b / Path(*parts)).resolve()\n    if p != b and b not in p.parents:\n        raise ValidationError(f\"unsafe path: {p}\")\n    return p\n\n\n@dataclass(frozen=True, slots=True)\nclass Record:\n    key: str\n    value: int\n\n\ndef parse_jsonl(lines: Iterable[str]) -> list[Record]:\n    out: list[Record] = []\n    for idx, line in enumerate(lines, 1):\n        s = line.strip()\n        if not s:\n            continue\n        try:\n            obj = json.loads(s)\n        except json.JSONDecodeError as e:\n            raise ValidationError(f\"line {idx}: invalid json\") from e\n        if not isinstance(obj, dict):\n            raise ValidationError(f\"line {idx}: expected object\")\n        key = obj.get(\"key\")\n        val = obj.get(\"value\")\n        if not isinstance(key, str) or not key:\n            raise ValidationError(f\"line {idx}: key must be non-empty str\")\n        if not isinstance(val, int):\n            raise ValidationError(f\"line {idx}: value must be int\")\n        out.append(Record(key, val))\n    return out\n\n\ndef write_jsonl(base: Path, rel: str, records: Iterable[Record]) -> Path:\n    base.mkdir(parents=True, exist_ok=True)\n    dst = safe_join(base, rel)\n    dst.parent.mkdir(parents=True, exist_ok=True)\n    with dst.open(\"w\", encoding=\"utf-8\", newline=\"\\n\") as f:\n        for r in records:\n            f.write(json.dumps({\"key\": r.key, \"value\": r.value}, ensure_ascii=False) + \"\\n\")\n    return dst\n```\n\n```python\n# tests (pytest-style)\nimport json\nfrom pathlib import Path\nimport pytest\nfrom module_under_test import ValidationError, Record, parse_jsonl, write_jsonl\n\ndef test_parse_ok():\n    lines = [json.dumps({\"key\":\"a\",\"value\":1}), json.dumps({\"key\":\"b\",\"value\":2})]\n    assert parse_jsonl(lines) == [Record(\"a\",1), Record(\"b\",2)]\n\ndef test_parse_rejects_bad_json():\n    with pytest.raises(ValidationError):\n        parse_jsonl([\"{not json}\"])\n\ndef test_safe_join_blocks_traversal(tmp_path: Path):\n    with pytest.raises(ValidationError):\n        write_jsonl(tmp_path, \"../x.jsonl\", [Record(\"a\",1)])\n```\n", "metadata": {"tags": ["python", "debugging", "tests"], "has_code": true, "requires_thinking": false, "seed": 1337}}