Datasets:
Size:
1M<n<10M
ArXiv:
Tags:
Document_Understanding
Document_Packet_Splitting
Document_Comprehension
Document_Classification
Document_Recognition
Document_Segmentation
DOI:
License:
File size: 4,526 Bytes
165da3c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: CC-BY-NC-4.0
import os
from pathlib import Path
from typing import List, Optional
import csv
from loguru import logger
import fitz
from models import Document
class PdfLoader:
"""Loads PDF files from raw data directory."""
def __init__(self, raw_data_path: str = ".data/rvl-cdip-mp/raw_data", max_file_size_mb: float = 150):
"""Initialize PDF loader.
Args:
raw_data_path: Path to raw data directory
max_file_size_mb: Maximum file size in MB for valid PDFs
"""
self.raw_data_path = Path(raw_data_path)
self.max_file_size_mb = max_file_size_mb
def validate_pdf(self, file_path: Path) -> tuple[bool, str, int]:
"""Validate if a PDF is readable and within size limits.
Args:
file_path: Path to PDF file
Returns:
Tuple of (is_valid, status_message, page_count)
"""
try:
# Check file size
size_bytes = file_path.stat().st_size
size_mb = size_bytes / (1024 * 1024)
if size_bytes == 0:
return False, "ZERO_SIZE", 0
if size_mb > self.max_file_size_mb:
return False, f"TOO_LARGE ({size_mb:.1f}MB)", 0
# Check PDF header
with open(file_path, "rb") as f:
header = f.read(4)
if header != b"%PDF":
return False, "INVALID_PDF_HEADER", 0
# Test readability with PyMuPDF
doc = fitz.open(str(file_path))
if doc.is_encrypted:
doc.close()
return False, "ENCRYPTED_PDF", 0
page_count = doc.page_count
if page_count == 0:
doc.close()
return False, "NO_PAGES", 0
# Try to access first page
page = doc[0]
page.get_text()
doc.close()
return True, "VALID", page_count
except Exception as e:
return False, f"ERROR: {str(e)[:50]}", 0
def get_all_documents(self, exclude_types: Optional[List[str]] = None) -> List[Document]:
"""Get all valid PDF documents from raw data directory.
Args:
exclude_types: List of document types to exclude (e.g., ['language'])
"""
if exclude_types is None:
exclude_types = []
documents = []
for doc_type_dir in self.raw_data_path.iterdir():
if not doc_type_dir.is_dir():
continue
doc_type = doc_type_dir.name
if doc_type in exclude_types:
logger.info(f"Skipping excluded type: {doc_type}")
continue
for pdf_file in doc_type_dir.glob("*.pdf"):
is_valid, status, page_count = self.validate_pdf(pdf_file)
if not is_valid:
logger.warning(f"Skipping invalid PDF {pdf_file}: {status}")
continue
doc = Document(
doc_type=doc_type,
doc_name=pdf_file.stem,
filename=pdf_file.name,
absolute_filepath=str(pdf_file.absolute()),
page_count=page_count
)
documents.append(doc)
logger.info(f"Loaded {len(documents)} valid documents")
return documents
def save_document_mapping(self, documents: List[Document], output_path: str):
"""Save document mapping to CSV."""
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, 'w', newline='') as f:
writer = csv.DictWriter(f, fieldnames=['type', 'doc_name', 'filename', 'pages', 'validation_status'])
writer.writeheader()
for doc in documents:
writer.writerow({
'type': doc.doc_type,
'doc_name': doc.doc_name,
'filename': doc.filename,
'pages': doc.page_count,
'validation_status': 'VALID'
})
logger.info(f"Saved document mapping to {output_path}")
|