fixed bug
Browse files- ref_seg_ger.py +7 -6
ref_seg_ger.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
| 14 |
# TODO: Address all TODOs and remove all explanatory comments
|
| 15 |
"""TODO: Add a description here."""
|
| 16 |
|
| 17 |
-
import
|
| 18 |
import os
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
|
@@ -49,10 +49,10 @@ _LICENSE = ""
|
|
| 49 |
# TODO: Add link to the official dataset URLs here
|
| 50 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 51 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 52 |
-
_URLS =
|
| 53 |
-
"http://hyperion.bbirke.de/data/ref_seg/ref_seg_ger_train.zip",
|
| 54 |
-
"http://hyperion.bbirke.de/data/ref_seg/ref_seg_ger_test.zip",
|
| 55 |
-
|
| 56 |
|
| 57 |
_LABELS = [
|
| 58 |
'publisher', 'source', 'url', 'other', 'author', 'editor', 'lpage',
|
|
@@ -203,8 +203,9 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 203 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 204 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 205 |
# print(filepath)
|
|
|
|
| 206 |
key = 0
|
| 207 |
-
for f in
|
| 208 |
df = pd.read_csv(f)
|
| 209 |
input_ids = []
|
| 210 |
labels = []
|
|
|
|
| 14 |
# TODO: Address all TODOs and remove all explanatory comments
|
| 15 |
"""TODO: Add a description here."""
|
| 16 |
|
| 17 |
+
from glob import glob
|
| 18 |
import os
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
|
|
|
| 49 |
# TODO: Add link to the official dataset URLs here
|
| 50 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 51 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 52 |
+
_URLS = {
|
| 53 |
+
'train': "http://hyperion.bbirke.de/data/ref_seg/ref_seg_ger_train.zip",
|
| 54 |
+
'test': "http://hyperion.bbirke.de/data/ref_seg/ref_seg_ger_test.zip",
|
| 55 |
+
}
|
| 56 |
|
| 57 |
_LABELS = [
|
| 58 |
'publisher', 'source', 'url', 'other', 'author', 'editor', 'lpage',
|
|
|
|
| 203 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 204 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 205 |
# print(filepath)
|
| 206 |
+
paths = glob(filepath + '*.csv')
|
| 207 |
key = 0
|
| 208 |
+
for f in paths:
|
| 209 |
df = pd.read_csv(f)
|
| 210 |
input_ids = []
|
| 211 |
labels = []
|