changed chunk generation
Browse files- ref_seg_ger.py +3 -3
ref_seg_ger.py
CHANGED
|
@@ -138,7 +138,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 138 |
# ]
|
| 139 |
|
| 140 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 141 |
-
TOKENIZER =
|
| 142 |
|
| 143 |
def _info(self):
|
| 144 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
|
@@ -217,8 +217,8 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 217 |
for i, row in df.iterrows():
|
| 218 |
|
| 219 |
#tokenized_input = row['token'].split(' ')
|
| 220 |
-
print(
|
| 221 |
-
tokenized_input, offsets = zip(*
|
| 222 |
print(tokenized_input)
|
| 223 |
if f.endswith('Cermaine_0.xml.csv'):
|
| 224 |
print(tokenized_input)
|
|
|
|
| 138 |
# ]
|
| 139 |
|
| 140 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 141 |
+
TOKENIZER = Whitespace()
|
| 142 |
|
| 143 |
def _info(self):
|
| 144 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
|
|
|
| 217 |
for i, row in df.iterrows():
|
| 218 |
|
| 219 |
#tokenized_input = row['token'].split(' ')
|
| 220 |
+
print(self.TOKENIZER.pre_tokenize(row['token']))
|
| 221 |
+
tokenized_input, offsets = zip(*self.TOKENIZER.pre_tokenize(row['token']))
|
| 222 |
print(tokenized_input)
|
| 223 |
if f.endswith('Cermaine_0.xml.csv'):
|
| 224 |
print(tokenized_input)
|