leonweber commited on
Commit
1f46876
·
1 Parent(s): 2ff6ed1

Upload pervasive_imdb.py

Browse files
Files changed (1) hide show
  1. pervasive_imdb.py +144 -0
pervasive_imdb.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import List, Tuple, Dict
16
+ from pathlib import Path
17
+ import json
18
+ import os
19
+ import numpy as np
20
+
21
+ import datasets
22
+
23
+ _CITATION = """\
24
+ @inproceedings{DBLP:conf/nips/NorthcuttAM21,
25
+ author = {Curtis G. Northcutt and
26
+ Anish Athalye and
27
+ Jonas Mueller},
28
+ editor = {Joaquin Vanschoren and
29
+ Sai{-}Kit Yeung},
30
+ title = {Pervasive Label Errors in Test Sets Destabilize Machine Learning Benchmarks},
31
+ booktitle = {Proceedings of the Neural Information Processing Systems Track on
32
+ Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December
33
+ 2021, virtual},
34
+ year = {2021},
35
+ url = {https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/f2217062e9a397a1dca429e7d70bc6ca-Abstract-round1.html},
36
+ timestamp = {Thu, 05 May 2022 16:53:59 +0200},
37
+ biburl = {https://dblp.org/rec/conf/nips/NorthcuttAM21.bib},
38
+ bibsource = {dblp computer science bibliography, https://dblp.org}
39
+ }
40
+ """
41
+
42
+ _DATASETNAME = "pervasive_imdb"
43
+
44
+ _DESCRIPTION = """\
45
+ This dataset is designed for Annotation Error Detection.
46
+ """
47
+
48
+ _HOMEPAGE = ""
49
+
50
+ _LICENSE = "GPL3"
51
+
52
+ _URLS = {
53
+ "imdb": "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
54
+ "mturk": "https://raw.githubusercontent.com/cleanlab/label-errors/main/mturk/imdb_mturk.json",
55
+ "indexing": "https://raw.githubusercontent.com/cleanlab/label-errors/main/dataset_indexing/imdb_test_set_index_to_filename.json"
56
+ }
57
+
58
+ _SOURCE_VERSION = "1.0.0"
59
+
60
+ _SCHEMA = datasets.Features({
61
+ "id": datasets.Value("string"),
62
+ "text": datasets.Value("string"),
63
+ "label": datasets.Value("string"),
64
+ "true_label": datasets.Value("string"),
65
+ })
66
+
67
+
68
+ class InconsistenciesFlights(datasets.GeneratorBasedBuilder):
69
+ _VERSION = datasets.Version(_SOURCE_VERSION)
70
+
71
+ def _info(self) -> datasets.DatasetInfo:
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=_SCHEMA,
75
+ supervised_keys=None,
76
+ homepage=_HOMEPAGE,
77
+ citation=_CITATION,
78
+ license=_LICENSE,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
82
+ """Returns SplitGenerators."""
83
+ imdb_dir = dl_manager.download_and_extract(_URLS["imdb"])
84
+ mturk_file = dl_manager.download_and_extract(_URLS["mturk"])
85
+ indexing_file = dl_manager.download_and_extract(_URLS["indexing"])
86
+
87
+ return [
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TRAIN,
90
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
91
+ gen_kwargs={
92
+ "imdb_dir": Path(imdb_dir) / "aclImdb",
93
+ "mturk_file": Path(mturk_file),
94
+ "indexing_file": Path(indexing_file)
95
+ },
96
+ ),
97
+ ]
98
+
99
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
100
+
101
+
102
+ def _generate_examples(self, imdb_dir: Path, mturk_file: Path, indexing_file: Path) -> Tuple[int, Dict]:
103
+ """Yields examples as (key, example) tuples."""
104
+ walk_order = {}
105
+ # We don't deal with train set indices, so any order is fine for the train set.
106
+ walk_order['train'] = [d + z for d in ["neg/", "pos/"] \
107
+ for z in os.listdir(imdb_dir / 'train' / d)]
108
+ # Test set walk order needs to match our order to map errors correctly.
109
+ with open(indexing_file, 'r') as rf:
110
+ walk_order['test'] = json.load(rf)
111
+
112
+ # This text dict stores the text data with keys ['train', 'test']
113
+ text = {}
114
+ # Read in text data for IMDB
115
+ for dataset in ['train', 'test']:
116
+ text[dataset] = []
117
+ dataset_dir = imdb_dir / dataset
118
+ for i, fn in enumerate(walk_order[dataset]):
119
+ with open(dataset_dir / fn, 'r') as rf:
120
+ text[dataset].append(rf.read())
121
+
122
+ idx_to_mturk = {}
123
+
124
+ with open(mturk_file) as f:
125
+ mturk_data = json.load(f)
126
+ for datapoint in mturk_data:
127
+ idx = walk_order['test'].index(datapoint['id'].removeprefix('test/') + ".txt")
128
+ idx_to_mturk[idx] = datapoint["mturk"]
129
+
130
+
131
+ # The given labels for both train and test set are the same.
132
+ labels = np.concatenate([np.zeros(12500), np.ones(12500)]).astype(int)
133
+
134
+ for i in range(25000):
135
+ if i in idx_to_mturk and idx_to_mturk[i]["given"] < 3:
136
+ true_label = not bool(labels[i])
137
+ else:
138
+ true_label = bool(labels[i])
139
+ yield (i, {
140
+ "id": str(i),
141
+ "text": text["test"][i],
142
+ "label": bool(labels[i]),
143
+ "true_label": true_label
144
+ })