Datasets:
Add multi-threading
#1
by
anirahulpersonal
- opened
- DocLayNet-base.py +53 -48
DocLayNet-base.py
CHANGED
|
@@ -28,6 +28,7 @@ import os
|
|
| 28 |
# import base64
|
| 29 |
from PIL import Image
|
| 30 |
import datasets
|
|
|
|
| 31 |
|
| 32 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
| 33 |
_CITATION = """\
|
|
@@ -190,51 +191,55 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
|
|
| 190 |
|
| 191 |
|
| 192 |
def _generate_examples(self, filepath, split):
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
# import base64
|
| 29 |
from PIL import Image
|
| 30 |
import datasets
|
| 31 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 32 |
|
| 33 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
| 34 |
_CITATION = """\
|
|
|
|
| 191 |
|
| 192 |
|
| 193 |
def _generate_examples(self, filepath, split):
|
| 194 |
+
logger.info("⏳ Generating examples from = %s", filepath)
|
| 195 |
+
ann_dir = os.path.join(filepath, "annotations")
|
| 196 |
+
img_dir = os.path.join(filepath, "images")
|
| 197 |
+
|
| 198 |
+
def process_file(file):
|
| 199 |
+
texts = []
|
| 200 |
+
bboxes_block = []
|
| 201 |
+
bboxes_line = []
|
| 202 |
+
categories = []
|
| 203 |
+
|
| 204 |
+
file_path = os.path.join(ann_dir, file)
|
| 205 |
+
with open(file_path, "r", encoding="utf8") as f:
|
| 206 |
+
data = json.load(f)
|
| 207 |
+
|
| 208 |
+
image_path = os.path.join(img_dir, file.replace("json", "png"))
|
| 209 |
+
image, size = load_image(image_path)
|
| 210 |
+
|
| 211 |
+
for item in data["form"]:
|
| 212 |
+
text_example, category_example, bbox_block_example, bbox_line_example = item["text"], item["category"], item["box"], item["box_line"]
|
| 213 |
+
texts.append(text_example)
|
| 214 |
+
categories.append(category_example)
|
| 215 |
+
bboxes_block.append(bbox_block_example)
|
| 216 |
+
bboxes_line.append(bbox_line_example)
|
| 217 |
+
|
| 218 |
+
metadata = data["metadata"]
|
| 219 |
+
return {
|
| 220 |
+
"texts": texts,
|
| 221 |
+
"bboxes_block": bboxes_block,
|
| 222 |
+
"bboxes_line": bboxes_line,
|
| 223 |
+
"categories": categories,
|
| 224 |
+
"image": image,
|
| 225 |
+
"page_hash": metadata["page_hash"],
|
| 226 |
+
"original_filename": metadata["original_filename"],
|
| 227 |
+
"page_no": metadata["page_no"],
|
| 228 |
+
"num_pages": metadata["num_pages"],
|
| 229 |
+
"original_width": metadata["original_width"],
|
| 230 |
+
"original_height": metadata["original_height"],
|
| 231 |
+
"coco_width": metadata["coco_width"],
|
| 232 |
+
"coco_height": metadata["coco_height"],
|
| 233 |
+
"collection": metadata["collection"],
|
| 234 |
+
"doc_category": metadata["doc_category"]
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
|
| 238 |
+
future_to_file = {executor.submit(process_file, file): file for file in sorted(os.listdir(ann_dir))}
|
| 239 |
+
for guid, future in enumerate(as_completed(future_to_file)):
|
| 240 |
+
file = future_to_file[future]
|
| 241 |
+
try:
|
| 242 |
+
result = future.result()
|
| 243 |
+
yield guid, {"id": str(guid), **result}
|
| 244 |
+
except Exception as exc:
|
| 245 |
+
logger.error(f"Error processing {file}: {exc}")
|