changed tokenization
Browse files- ref_seg_ger.py +5 -10
ref_seg_ger.py
CHANGED
|
@@ -142,15 +142,10 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 142 |
# ]
|
| 143 |
|
| 144 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
Split(":", behavior="isolated"),
|
| 150 |
-
Split("/", behavior="isolated"),
|
| 151 |
-
Split("-", behavior="isolated"),
|
| 152 |
-
Split(",", behavior="isolated"),
|
| 153 |
-
])
|
| 154 |
|
| 155 |
def _info(self):
|
| 156 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
|
@@ -285,7 +280,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 285 |
split_ids = np.array_split(clean_input_ids, n_chunks)
|
| 286 |
split_labels = np.array_split(clean_labels, n_chunks)
|
| 287 |
split_refs = np.array_split(clean_refs, n_chunks)
|
| 288 |
-
for chunk_ids, chunk_labels, chunk_refs in zip(
|
| 289 |
# for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)):
|
| 290 |
# split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
|
| 291 |
# split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
|
|
|
|
| 142 |
# ]
|
| 143 |
|
| 144 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 145 |
+
|
| 146 |
+
#split_tokens = [".", ":", ",",";","/","-","(", ")"]
|
| 147 |
+
|
| 148 |
+
TOKENIZER = Whitespace()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
|
| 150 |
def _info(self):
|
| 151 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
|
|
|
| 280 |
split_ids = np.array_split(clean_input_ids, n_chunks)
|
| 281 |
split_labels = np.array_split(clean_labels, n_chunks)
|
| 282 |
split_refs = np.array_split(clean_refs, n_chunks)
|
| 283 |
+
for chunk_ids, chunk_labels, chunk_refs in zip(clean_input_ids, clean_labels, clean_refs):
|
| 284 |
# for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)):
|
| 285 |
# split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
|
| 286 |
# split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
|