Spaces:
Sleeping
Sleeping
YanBoChen
commited on
Commit
·
c0317b2
1
Parent(s):
f3ac7d9
WIP: Enhance dual keyword chunking to include pre-calculated metadata for treatment chunks
Browse files- src/data_processing.py +61 -46
src/data_processing.py
CHANGED
|
@@ -231,74 +231,89 @@ class DataProcessor:
|
|
| 231 |
return chunks
|
| 232 |
|
| 233 |
def create_dual_keyword_chunks(self, text: str, emergency_keywords: str,
|
| 234 |
-
treatment_keywords: str, chunk_size: int =
|
| 235 |
doc_id: str = None) -> List[Dict[str, Any]]:
|
| 236 |
"""
|
| 237 |
Create chunks for treatment data with both emergency and treatment keywords
|
|
|
|
| 238 |
|
| 239 |
Args:
|
| 240 |
text: Input text
|
| 241 |
-
emergency_keywords: Emergency keywords
|
| 242 |
-
treatment_keywords: Treatment keywords
|
| 243 |
-
chunk_size: Size of each chunk
|
| 244 |
doc_id: Document ID for tracking
|
| 245 |
|
| 246 |
Returns:
|
| 247 |
-
List of chunk dictionaries
|
| 248 |
"""
|
| 249 |
if not treatment_keywords or pd.isna(treatment_keywords):
|
| 250 |
return []
|
| 251 |
|
| 252 |
chunks = []
|
| 253 |
-
|
| 254 |
-
tr_keywords = treatment_keywords.split("|") if treatment_keywords else []
|
| 255 |
|
| 256 |
-
#
|
| 257 |
-
|
| 258 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
closest_distance = float('inf')
|
| 264 |
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
if
|
| 268 |
-
|
| 269 |
-
if distance < closest_distance and distance < chunk_size:
|
| 270 |
-
closest_distance = distance
|
| 271 |
-
closest_em_keyword = em_keyword
|
| 272 |
|
| 273 |
-
#
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
center = (tr_pos + em_pos) // 2
|
| 278 |
-
else:
|
| 279 |
-
# Center on treatment keyword
|
| 280 |
-
center = tr_pos
|
| 281 |
|
| 282 |
-
|
| 283 |
-
|
|
|
|
| 284 |
|
| 285 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
}
|
| 300 |
-
chunks.append(chunk_info)
|
| 301 |
|
|
|
|
| 302 |
return chunks
|
| 303 |
|
| 304 |
def process_emergency_chunks(self) -> List[Dict[str, Any]]:
|
|
|
|
| 231 |
return chunks
|
| 232 |
|
| 233 |
def create_dual_keyword_chunks(self, text: str, emergency_keywords: str,
|
| 234 |
+
treatment_keywords: str, chunk_size: int = None,
|
| 235 |
doc_id: str = None) -> List[Dict[str, Any]]:
|
| 236 |
"""
|
| 237 |
Create chunks for treatment data with both emergency and treatment keywords
|
| 238 |
+
使用token-based分離chunking策略,為treatment chunks添加預計算metadata
|
| 239 |
|
| 240 |
Args:
|
| 241 |
text: Input text
|
| 242 |
+
emergency_keywords: Emergency keywords (pipe-separated)
|
| 243 |
+
treatment_keywords: Treatment keywords (pipe-separated)
|
| 244 |
+
chunk_size: Size of each chunk in tokens (defaults to self.chunk_size)
|
| 245 |
doc_id: Document ID for tracking
|
| 246 |
|
| 247 |
Returns:
|
| 248 |
+
List of chunk dictionaries with enhanced metadata for treatment chunks
|
| 249 |
"""
|
| 250 |
if not treatment_keywords or pd.isna(treatment_keywords):
|
| 251 |
return []
|
| 252 |
|
| 253 |
chunks = []
|
| 254 |
+
chunk_size = chunk_size or self.chunk_size
|
|
|
|
| 255 |
|
| 256 |
+
# Parse keywords
|
| 257 |
+
em_kws = emergency_keywords.split('|') if emergency_keywords else []
|
| 258 |
+
tr_kws = treatment_keywords.split('|') if treatment_keywords else []
|
| 259 |
+
|
| 260 |
+
# Step 1: Process emergency keywords (保持原有格式)
|
| 261 |
+
if emergency_keywords:
|
| 262 |
+
em_chunks = self.create_keyword_centered_chunks(
|
| 263 |
+
text, emergency_keywords, chunk_size, doc_id
|
| 264 |
+
)
|
| 265 |
+
# 標記為emergency chunks,保持原有metadata格式
|
| 266 |
+
for chunk in em_chunks:
|
| 267 |
+
chunk['source_type'] = 'emergency'
|
| 268 |
+
chunks.extend(em_chunks)
|
| 269 |
+
|
| 270 |
+
# Step 2: Process treatment keywords (添加新metadata)
|
| 271 |
+
if treatment_keywords:
|
| 272 |
+
tr_chunks = self.create_keyword_centered_chunks(
|
| 273 |
+
text, treatment_keywords, chunk_size, doc_id
|
| 274 |
+
)
|
| 275 |
|
| 276 |
+
# 為每個treatment chunk添加預計算metadata
|
| 277 |
+
for i, chunk in enumerate(tr_chunks):
|
| 278 |
+
chunk_text = chunk['text'].lower()
|
|
|
|
| 279 |
|
| 280 |
+
# 檢查文本包含的emergency關鍵字
|
| 281 |
+
contains_emergency_kws = [
|
| 282 |
+
kw for kw in em_kws if kw.lower() in chunk_text
|
| 283 |
+
]
|
|
|
|
|
|
|
|
|
|
| 284 |
|
| 285 |
+
# 檢查文本包含的treatment關鍵字
|
| 286 |
+
contains_treatment_kws = [
|
| 287 |
+
kw for kw in tr_kws if kw.lower() in chunk_text
|
| 288 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
|
| 290 |
+
# 確定匹配類型
|
| 291 |
+
has_emergency = len(contains_emergency_kws) > 0
|
| 292 |
+
has_treatment = len(contains_treatment_kws) > 0
|
| 293 |
|
| 294 |
+
if has_emergency and has_treatment:
|
| 295 |
+
match_type = "both"
|
| 296 |
+
elif has_emergency:
|
| 297 |
+
match_type = "emergency_only"
|
| 298 |
+
elif has_treatment:
|
| 299 |
+
match_type = "treatment_only"
|
| 300 |
+
else:
|
| 301 |
+
match_type = "none"
|
| 302 |
|
| 303 |
+
# 添加預計算metadata (僅treatment chunks)
|
| 304 |
+
chunk.update({
|
| 305 |
+
'source_type': 'treatment',
|
| 306 |
+
'contains_emergency_kws': contains_emergency_kws,
|
| 307 |
+
'contains_treatment_kws': contains_treatment_kws,
|
| 308 |
+
'match_type': match_type,
|
| 309 |
+
'emergency_keywords': emergency_keywords, # 保存原始metadata
|
| 310 |
+
'treatment_keywords': treatment_keywords,
|
| 311 |
+
'chunk_id': f"{doc_id}_treatment_chunk_{i}" if doc_id else f"treatment_chunk_{i}"
|
| 312 |
+
})
|
| 313 |
+
|
| 314 |
+
chunks.extend(tr_chunks)
|
|
|
|
|
|
|
| 315 |
|
| 316 |
+
logger.debug(f"Created {len(chunks)} dual-keyword chunks for document {doc_id or 'unknown'}")
|
| 317 |
return chunks
|
| 318 |
|
| 319 |
def process_emergency_chunks(self) -> List[Dict[str, Any]]:
|