| | |
| | """ |
| | Document Segmentation MCP Server |
| | |
| | This MCP server provides intelligent document segmentation and retrieval functions for handling |
| | large research papers and technical documents that exceed LLM token limits. |
| | |
| | ==== CORE FUNCTIONALITY ==== |
| | 1. Analyze document structure and type using semantic content analysis |
| | 2. Create intelligent segments based on content semantics, not just structure |
| | 3. Provide query-aware segment retrieval with relevance scoring |
| | 4. Support both structured (papers with headers) and unstructured documents |
| | 5. Configurable segmentation strategies based on document complexity |
| | |
| | ==== MCP TOOLS PROVIDED ==== |
| | |
| | ๐ analyze_and_segment_document(paper_dir: str, force_refresh: bool = False) |
| | Purpose: Analyzes document structure and creates intelligent segments |
| | - Detects document type (research paper, technical doc, algorithm-focused, etc.) |
| | - Selects optimal segmentation strategy based on content analysis |
| | - Creates semantic segments preserving algorithm and concept integrity |
| | - Stores segmentation index for efficient retrieval |
| | - Returns: JSON with segmentation status, strategy used, and segment count |
| | |
| | ๐ read_document_segments(paper_dir: str, query_type: str, keywords: List[str] = None, |
| | max_segments: int = 3, max_total_chars: int = None) |
| | Purpose: Intelligently retrieves relevant document segments based on query context |
| | - query_type: "concept_analysis", "algorithm_extraction", or "code_planning" |
| | - Uses semantic relevance scoring to rank segments |
| | - Applies query-specific filtering and keyword matching |
| | - Dynamically calculates optimal character limits based on content complexity |
| | - Returns: JSON with selected segments optimized for the specific query type |
| | |
| | ๐ get_document_overview(paper_dir: str) |
| | Purpose: Provides high-level overview of document structure and available segments |
| | - Shows document type and segmentation strategy used |
| | - Lists all segments with titles, content types, and relevance scores |
| | - Displays segment statistics (character counts, keyword summaries) |
| | - Returns: JSON with complete document analysis metadata |
| | |
| | ==== SEGMENTATION STRATEGIES ==== |
| | - semantic_research_focused: For academic papers with complex algorithmic content |
| | - algorithm_preserve_integrity: Maintains algorithm blocks and formula chains intact |
| | - concept_implementation_hybrid: Merges related concepts with implementation details |
| | - semantic_chunking_enhanced: Advanced boundary detection for long documents |
| | - content_aware_segmentation: Adaptive chunking based on content density |
| | |
| | ==== INTELLIGENT FEATURES ==== |
| | - Semantic boundary detection (not just structural) |
| | - Algorithm block identification and preservation |
| | - Formula chain recognition and grouping |
| | - Concept-implementation relationship mapping |
| | - Multi-level relevance scoring (content type, importance, keyword matching) |
| | - Backward compatibility with existing document indexes |
| | - Configurable via mcp_agent.config.yaml (enabled/disabled, size thresholds) |
| | |
| | Usage: |
| | python tools/document_segmentation_server.py |
| | """ |
| |
|
| | import os |
| | import re |
| | import json |
| | import sys |
| | import io |
| | from typing import Dict, List, Tuple |
| | import hashlib |
| | import logging |
| | from datetime import datetime |
| | from dataclasses import dataclass, asdict |
| |
|
| | |
| | if sys.stdout.encoding != "utf-8": |
| | try: |
| | if hasattr(sys.stdout, "reconfigure"): |
| | sys.stdout.reconfigure(encoding="utf-8") |
| | sys.stderr.reconfigure(encoding="utf-8") |
| | else: |
| | sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding="utf-8") |
| | sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding="utf-8") |
| | except Exception as e: |
| | print(f"Warning: Could not set UTF-8 encoding: {e}") |
| |
|
| | |
| | from mcp.server.fastmcp import FastMCP |
| |
|
| | |
| | logging.basicConfig(level=logging.INFO) |
| | logger = logging.getLogger(__name__) |
| |
|
| | |
| | mcp = FastMCP("document-segmentation-server") |
| |
|
| |
|
| | @dataclass |
| | class DocumentSegment: |
| | """Represents a document segment with metadata""" |
| |
|
| | id: str |
| | title: str |
| | content: str |
| | content_type: str |
| | keywords: List[str] |
| | char_start: int |
| | char_end: int |
| | char_count: int |
| | relevance_scores: Dict[str, float] |
| | section_path: str |
| |
|
| |
|
| | @dataclass |
| | class DocumentIndex: |
| | """Document index containing all segments and metadata""" |
| |
|
| | document_path: str |
| | document_type: str |
| | segmentation_strategy: str |
| | total_segments: int |
| | total_chars: int |
| | segments: List[DocumentSegment] |
| | created_at: str |
| |
|
| |
|
| | class DocumentAnalyzer: |
| | """Enhanced document analyzer using semantic content analysis instead of mechanical structure detection""" |
| |
|
| | |
| | ALGORITHM_INDICATORS = { |
| | "high": [ |
| | "algorithm", |
| | "procedure", |
| | "method", |
| | "approach", |
| | "technique", |
| | "framework", |
| | ], |
| | "medium": ["step", "process", "implementation", "computation", "calculation"], |
| | "low": ["example", "illustration", "demonstration"], |
| | } |
| |
|
| | TECHNICAL_CONCEPT_INDICATORS = { |
| | "high": ["formula", "equation", "theorem", "lemma", "proof", "definition"], |
| | "medium": ["parameter", "variable", "function", "model", "architecture"], |
| | "low": ["notation", "symbol", "term"], |
| | } |
| |
|
| | IMPLEMENTATION_INDICATORS = { |
| | "high": ["code", "implementation", "programming", "software", "system"], |
| | "medium": ["design", "structure", "module", "component", "interface"], |
| | "low": ["tool", "library", "package"], |
| | } |
| |
|
| | |
| | RESEARCH_PAPER_PATTERNS = [ |
| | r"(?i)\babstract\b.*?\n.*?(introduction|motivation|background)", |
| | r"(?i)(methodology|method).*?(experiment|evaluation|result)", |
| | r"(?i)(conclusion|future work|limitation).*?(reference|bibliography)", |
| | r"(?i)(related work|literature review|prior art)", |
| | ] |
| |
|
| | TECHNICAL_DOC_PATTERNS = [ |
| | r"(?i)(getting started|installation|setup).*?(usage|example)", |
| | r"(?i)(api|interface|specification).*?(parameter|endpoint)", |
| | r"(?i)(tutorial|guide|walkthrough).*?(step|instruction)", |
| | r"(?i)(troubleshooting|faq|common issues)", |
| | ] |
| |
|
| | def analyze_document_type(self, content: str) -> Tuple[str, float]: |
| | """ |
| | Enhanced document type analysis based on semantic content patterns |
| | |
| | Returns: |
| | Tuple[str, float]: (document_type, confidence_score) |
| | """ |
| | content_lower = content.lower() |
| |
|
| | |
| | algorithm_score = self._calculate_weighted_score( |
| | content_lower, self.ALGORITHM_INDICATORS |
| | ) |
| | concept_score = self._calculate_weighted_score( |
| | content_lower, self.TECHNICAL_CONCEPT_INDICATORS |
| | ) |
| | implementation_score = self._calculate_weighted_score( |
| | content_lower, self.IMPLEMENTATION_INDICATORS |
| | ) |
| |
|
| | |
| | research_pattern_score = self._detect_pattern_score( |
| | content, self.RESEARCH_PAPER_PATTERNS |
| | ) |
| | technical_pattern_score = self._detect_pattern_score( |
| | content, self.TECHNICAL_DOC_PATTERNS |
| | ) |
| |
|
| | |
| | total_research_score = ( |
| | algorithm_score + concept_score + research_pattern_score * 2 |
| | ) |
| | total_technical_score = implementation_score + technical_pattern_score * 2 |
| |
|
| | |
| | if research_pattern_score > 0.5 and total_research_score > 3.0: |
| | return "research_paper", min(0.95, 0.6 + research_pattern_score * 0.35) |
| | elif algorithm_score > 2.0 and concept_score > 1.5: |
| | return "algorithm_focused", 0.85 |
| | elif total_technical_score > 2.5: |
| | return "technical_doc", 0.8 |
| | elif implementation_score > 1.5: |
| | return "implementation_guide", 0.75 |
| | else: |
| | return "general_document", 0.5 |
| |
|
| | def _calculate_weighted_score( |
| | self, content: str, indicators: Dict[str, List[str]] |
| | ) -> float: |
| | """Calculate weighted semantic indicator scores""" |
| | score = 0.0 |
| | for weight_level, terms in indicators.items(): |
| | weight = {"high": 3.0, "medium": 2.0, "low": 1.0}[weight_level] |
| | for term in terms: |
| | if term in content: |
| | score += weight * ( |
| | content.count(term) * 0.5 + 1 |
| | ) |
| | return score |
| |
|
| | def _detect_pattern_score(self, content: str, patterns: List[str]) -> float: |
| | """Detect semantic pattern matching scores""" |
| | matches = 0 |
| | for pattern in patterns: |
| | if re.search(pattern, content, re.DOTALL): |
| | matches += 1 |
| | return matches / len(patterns) |
| |
|
| | def detect_segmentation_strategy(self, content: str, doc_type: str) -> str: |
| | """ |
| | Intelligently determine the best segmentation strategy based on content semantics rather than mechanical structure |
| | """ |
| | |
| | algorithm_density = self._calculate_algorithm_density(content) |
| | concept_complexity = self._calculate_concept_complexity(content) |
| | implementation_detail_level = self._calculate_implementation_detail_level( |
| | content |
| | ) |
| |
|
| | |
| | if doc_type == "research_paper" and algorithm_density > 0.3: |
| | return "semantic_research_focused" |
| | elif doc_type == "algorithm_focused" or algorithm_density > 0.5: |
| | return "algorithm_preserve_integrity" |
| | elif concept_complexity > 0.4 and implementation_detail_level > 0.3: |
| | return "concept_implementation_hybrid" |
| | elif len(content) > 15000: |
| | return "semantic_chunking_enhanced" |
| | else: |
| | return "content_aware_segmentation" |
| |
|
| | def _calculate_algorithm_density(self, content: str) -> float: |
| | """Calculate algorithm content density""" |
| | total_chars = len(content) |
| | algorithm_chars = 0 |
| |
|
| | |
| | algorithm_patterns = [ |
| | r"(?i)(algorithm\s+\d+|procedure\s+\d+)", |
| | r"(?i)(step\s+\d+|phase\s+\d+)", |
| | r"(?i)(input:|output:|return:|initialize:)", |
| | r"(?i)(for\s+each|while|if.*then|else)", |
| | r"(?i)(function|method|procedure).*\(", |
| | ] |
| |
|
| | for pattern in algorithm_patterns: |
| | matches = re.finditer(pattern, content) |
| | for match in matches: |
| | |
| | start = max(0, match.start() - 200) |
| | end = min(len(content), match.end() + 800) |
| | algorithm_chars += end - start |
| |
|
| | return min(1.0, algorithm_chars / total_chars) |
| |
|
| | def _calculate_concept_complexity(self, content: str) -> float: |
| | """Calculate concept complexity""" |
| | concept_indicators = self.TECHNICAL_CONCEPT_INDICATORS |
| | complexity_score = 0.0 |
| |
|
| | for level, terms in concept_indicators.items(): |
| | weight = {"high": 3.0, "medium": 2.0, "low": 1.0}[level] |
| | for term in terms: |
| | complexity_score += content.lower().count(term) * weight |
| |
|
| | |
| | return min(1.0, complexity_score / 100) |
| |
|
| | def _calculate_implementation_detail_level(self, content: str) -> float: |
| | """Calculate implementation detail level""" |
| | implementation_patterns = [ |
| | r"(?i)(code|implementation|programming)", |
| | r"(?i)(class|function|method|variable)", |
| | r"(?i)(import|include|library)", |
| | r"(?i)(parameter|argument|return)", |
| | r"(?i)(example|demo|tutorial)", |
| | ] |
| |
|
| | detail_score = 0 |
| | for pattern in implementation_patterns: |
| | detail_score += len(re.findall(pattern, content)) |
| |
|
| | return min(1.0, detail_score / 50) |
| |
|
| |
|
| | class DocumentSegmenter: |
| | """Creates intelligent segments from documents""" |
| |
|
| | def __init__(self): |
| | self.analyzer = DocumentAnalyzer() |
| |
|
| | def segment_document(self, content: str, strategy: str) -> List[DocumentSegment]: |
| | """ |
| | Perform intelligent segmentation using the specified strategy |
| | """ |
| | if strategy == "semantic_research_focused": |
| | return self._segment_research_paper_semantically(content) |
| | elif strategy == "algorithm_preserve_integrity": |
| | return self._segment_preserve_algorithm_integrity(content) |
| | elif strategy == "concept_implementation_hybrid": |
| | return self._segment_concept_implementation_hybrid(content) |
| | elif strategy == "semantic_chunking_enhanced": |
| | return self._segment_by_enhanced_semantic_chunks(content) |
| | elif strategy == "content_aware_segmentation": |
| | return self._segment_content_aware(content) |
| | else: |
| | |
| | return self._segment_by_enhanced_semantic_chunks(content) |
| |
|
| | def _segment_by_headers(self, content: str) -> List[DocumentSegment]: |
| | """Segment document based on markdown headers""" |
| | segments = [] |
| | lines = content.split("\n") |
| | current_segment = [] |
| | current_header = None |
| | char_pos = 0 |
| |
|
| | for line in lines: |
| | line_with_newline = line + "\n" |
| |
|
| | |
| | header_match = re.match(r"^(#{1,6})\s+(.+)$", line) |
| |
|
| | if header_match: |
| | |
| | if current_segment and current_header: |
| | segment_content = "\n".join(current_segment).strip() |
| | if segment_content: |
| | |
| | content_type = self._classify_content_type( |
| | current_header, segment_content |
| | ) |
| | importance_score = ( |
| | 0.8 if content_type in ["algorithm", "formula"] else 0.7 |
| | ) |
| |
|
| | segment = self._create_enhanced_segment( |
| | segment_content, |
| | current_header, |
| | char_pos - len(segment_content.encode("utf-8")), |
| | char_pos, |
| | importance_score, |
| | content_type, |
| | ) |
| | segments.append(segment) |
| |
|
| | |
| | current_header = header_match.group(2).strip() |
| | current_segment = [line] |
| | else: |
| | if current_segment is not None: |
| | current_segment.append(line) |
| |
|
| | char_pos += len(line_with_newline.encode("utf-8")) |
| |
|
| | |
| | if current_segment and current_header: |
| | segment_content = "\n".join(current_segment).strip() |
| | if segment_content: |
| | |
| | content_type = self._classify_content_type( |
| | current_header, segment_content |
| | ) |
| | importance_score = ( |
| | 0.8 if content_type in ["algorithm", "formula"] else 0.7 |
| | ) |
| |
|
| | segment = self._create_enhanced_segment( |
| | segment_content, |
| | current_header, |
| | char_pos - len(segment_content.encode("utf-8")), |
| | char_pos, |
| | importance_score, |
| | content_type, |
| | ) |
| | segments.append(segment) |
| |
|
| | return segments |
| |
|
| | def _segment_preserve_algorithm_integrity( |
| | self, content: str |
| | ) -> List[DocumentSegment]: |
| | """Smart segmentation strategy that preserves algorithm integrity""" |
| | segments = [] |
| |
|
| | |
| | algorithm_blocks = self._identify_algorithm_blocks(content) |
| |
|
| | |
| | concept_groups = self._identify_concept_groups(content) |
| |
|
| | |
| | formula_chains = self._identify_formula_chains(content) |
| |
|
| | |
| | content_blocks = self._merge_related_content_blocks( |
| | algorithm_blocks, concept_groups, formula_chains, content |
| | ) |
| |
|
| | |
| | for i, block in enumerate(content_blocks): |
| | segment = self._create_enhanced_segment( |
| | block["content"], |
| | block["title"], |
| | block["start_pos"], |
| | block["end_pos"], |
| | block["importance_score"], |
| | block["content_type"], |
| | ) |
| | segments.append(segment) |
| |
|
| | return segments |
| |
|
| | def _segment_research_paper_semantically( |
| | self, content: str |
| | ) -> List[DocumentSegment]: |
| | """Semantic segmentation specifically for research papers""" |
| | segments = [] |
| |
|
| | |
| | paper_sections = self._identify_research_paper_sections(content) |
| |
|
| | for section in paper_sections: |
| | |
| | enhanced_content = self._enhance_section_with_context(section, content) |
| |
|
| | segment = self._create_enhanced_segment( |
| | enhanced_content["content"], |
| | enhanced_content["title"], |
| | enhanced_content["start_pos"], |
| | enhanced_content["end_pos"], |
| | enhanced_content["importance_score"], |
| | enhanced_content["content_type"], |
| | ) |
| | segments.append(segment) |
| |
|
| | return segments |
| |
|
| | def _segment_concept_implementation_hybrid( |
| | self, content: str |
| | ) -> List[DocumentSegment]: |
| | """Intelligent segmentation combining concepts and implementation""" |
| | segments = [] |
| |
|
| | |
| | concept_impl_pairs = self._identify_concept_implementation_pairs(content) |
| |
|
| | for pair in concept_impl_pairs: |
| | |
| | merged_content = self._merge_concept_with_implementation(pair, content) |
| |
|
| | segment = self._create_enhanced_segment( |
| | merged_content["content"], |
| | merged_content["title"], |
| | merged_content["start_pos"], |
| | merged_content["end_pos"], |
| | merged_content["importance_score"], |
| | merged_content["content_type"], |
| | ) |
| | segments.append(segment) |
| |
|
| | return segments |
| |
|
| | def _segment_by_enhanced_semantic_chunks( |
| | self, content: str |
| | ) -> List[DocumentSegment]: |
| | """Enhanced semantic chunk segmentation""" |
| | segments = [] |
| |
|
| | |
| | semantic_boundaries = self._detect_semantic_boundaries(content) |
| |
|
| | current_start = 0 |
| | for i, boundary in enumerate(semantic_boundaries): |
| | chunk_content = content[current_start : boundary["position"]] |
| |
|
| | if len(chunk_content.strip()) > 200: |
| | segment = self._create_enhanced_segment( |
| | chunk_content, |
| | boundary["suggested_title"], |
| | current_start, |
| | boundary["position"], |
| | boundary["importance_score"], |
| | boundary["content_type"], |
| | ) |
| | segments.append(segment) |
| |
|
| | current_start = boundary["position"] |
| |
|
| | |
| | if current_start < len(content): |
| | final_content = content[current_start:] |
| | if len(final_content.strip()) > 200: |
| | segment = self._create_enhanced_segment( |
| | final_content, |
| | "Final Section", |
| | current_start, |
| | len(content), |
| | 0.7, |
| | "general", |
| | ) |
| | segments.append(segment) |
| |
|
| | return segments |
| |
|
| | def _segment_content_aware(self, content: str) -> List[DocumentSegment]: |
| | """Content-aware intelligent segmentation""" |
| | segments = [] |
| |
|
| | |
| | optimal_chunk_size = self._calculate_optimal_chunk_size(content) |
| |
|
| | |
| | content_chunks = self._create_content_aware_chunks(content, optimal_chunk_size) |
| |
|
| | for chunk in content_chunks: |
| | segment = self._create_enhanced_segment( |
| | chunk["content"], |
| | chunk["title"], |
| | chunk["start_pos"], |
| | chunk["end_pos"], |
| | chunk["importance_score"], |
| | chunk["content_type"], |
| | ) |
| | segments.append(segment) |
| |
|
| | return segments |
| |
|
| | def _segment_academic_paper(self, content: str) -> List[DocumentSegment]: |
| | """Segment academic paper using semantic understanding""" |
| | |
| | headers = re.findall(r"^(#{1,6})\s+(.+)$", content, re.MULTILINE) |
| | if len(headers) >= 2: |
| | return self._segment_by_headers(content) |
| |
|
| | |
| | sections = self._detect_academic_sections(content) |
| | segments = [] |
| |
|
| | for section in sections: |
| | |
| | section_type = section.get("type", "general") |
| | content_type = ( |
| | section_type |
| | if section_type |
| | in ["algorithm", "formula", "introduction", "conclusion"] |
| | else "general" |
| | ) |
| | importance_score = { |
| | "algorithm": 0.95, |
| | "formula": 0.9, |
| | "introduction": 0.85, |
| | "conclusion": 0.8, |
| | }.get(content_type, 0.7) |
| |
|
| | segment = self._create_enhanced_segment( |
| | section["content"], |
| | section["title"], |
| | section["start_pos"], |
| | section["end_pos"], |
| | importance_score, |
| | content_type, |
| | ) |
| | segments.append(segment) |
| |
|
| | return segments |
| |
|
| | def _detect_academic_sections(self, content: str) -> List[Dict]: |
| | """Detect academic paper sections even without clear headers""" |
| | sections = [] |
| |
|
| | |
| | section_patterns = [ |
| | (r"(?i)(abstract|ๆ่ฆ)", "introduction"), |
| | (r"(?i)(introduction|ๅผ่จ|็ฎไป)", "introduction"), |
| | (r"(?i)(related work|็ธๅ
ณๅทฅไฝ|่ๆฏ)", "background"), |
| | (r"(?i)(method|methodology|approach|ๆนๆณ)", "methodology"), |
| | (r"(?i)(algorithm|็ฎๆณ)", "algorithm"), |
| | (r"(?i)(experiment|ๅฎ้ช|evaluation|่ฏไผฐ)", "experiment"), |
| | (r"(?i)(result|็ปๆ|finding)", "results"), |
| | (r"(?i)(conclusion|็ป่ฎบ|ๆป็ป)", "conclusion"), |
| | (r"(?i)(reference|ๅ่ๆ็ฎ|bibliography)", "references"), |
| | ] |
| |
|
| | current_pos = 0 |
| | for i, (pattern, section_type) in enumerate(section_patterns): |
| | match = re.search(pattern, content[current_pos:], re.IGNORECASE) |
| | if match: |
| | start_pos = current_pos + match.start() |
| |
|
| | |
| | next_pos = len(content) |
| | for next_pattern, _ in section_patterns[i + 1 :]: |
| | next_match = re.search( |
| | next_pattern, content[start_pos + 100 :], re.IGNORECASE |
| | ) |
| | if next_match: |
| | next_pos = start_pos + 100 + next_match.start() |
| | break |
| |
|
| | section_content = content[start_pos:next_pos].strip() |
| | if len(section_content) > 50: |
| | |
| | importance_score = self._calculate_paragraph_importance( |
| | section_content, section_type |
| | ) |
| | content_type = self._classify_content_type( |
| | match.group(1), section_content |
| | ) |
| |
|
| | sections.append( |
| | { |
| | "title": match.group(1), |
| | "content": section_content, |
| | "start_pos": start_pos, |
| | "end_pos": next_pos, |
| | "type": section_type, |
| | "importance_score": importance_score, |
| | "content_type": content_type, |
| | } |
| | ) |
| |
|
| | current_pos = next_pos |
| |
|
| | return sections |
| |
|
| | def _segment_by_semantic_chunks(self, content: str) -> List[DocumentSegment]: |
| | """Segment long documents into semantic chunks""" |
| | |
| | paragraphs = [p.strip() for p in content.split("\n\n") if p.strip()] |
| |
|
| | segments = [] |
| | current_chunk = [] |
| | current_chunk_size = 0 |
| | chunk_size_limit = 3000 |
| | overlap_size = 200 |
| |
|
| | char_pos = 0 |
| |
|
| | for para in paragraphs: |
| | para_size = len(para) |
| |
|
| | |
| | if current_chunk_size + para_size > chunk_size_limit and current_chunk: |
| | chunk_content = "\n\n".join(current_chunk) |
| | |
| | content_type = self._classify_paragraph_type(chunk_content) |
| | importance_score = self._calculate_paragraph_importance( |
| | chunk_content, content_type |
| | ) |
| |
|
| | segment = self._create_enhanced_segment( |
| | chunk_content, |
| | f"Section {len(segments) + 1}", |
| | char_pos - len(chunk_content.encode("utf-8")), |
| | char_pos, |
| | importance_score, |
| | content_type, |
| | ) |
| | segments.append(segment) |
| |
|
| | |
| | overlap_content = ( |
| | chunk_content[-overlap_size:] |
| | if len(chunk_content) > overlap_size |
| | else "" |
| | ) |
| | current_chunk = [overlap_content, para] if overlap_content else [para] |
| | current_chunk_size = len(overlap_content) + para_size |
| | else: |
| | current_chunk.append(para) |
| | current_chunk_size += para_size |
| |
|
| | char_pos += para_size + 2 |
| |
|
| | |
| | if current_chunk: |
| | chunk_content = "\n\n".join(current_chunk) |
| | |
| | content_type = self._classify_paragraph_type(chunk_content) |
| | importance_score = self._calculate_paragraph_importance( |
| | chunk_content, content_type |
| | ) |
| |
|
| | segment = self._create_enhanced_segment( |
| | chunk_content, |
| | f"Section {len(segments) + 1}", |
| | char_pos - len(chunk_content.encode("utf-8")), |
| | char_pos, |
| | importance_score, |
| | content_type, |
| | ) |
| | segments.append(segment) |
| |
|
| | return segments |
| |
|
| | def _segment_by_paragraphs(self, content: str) -> List[DocumentSegment]: |
| | """Simple paragraph-based segmentation for short documents""" |
| | paragraphs = [p.strip() for p in content.split("\n\n") if p.strip()] |
| | segments = [] |
| | char_pos = 0 |
| |
|
| | for i, para in enumerate(paragraphs): |
| | if len(para) > 100: |
| | |
| | content_type = self._classify_paragraph_type(para) |
| | importance_score = self._calculate_paragraph_importance( |
| | para, content_type |
| | ) |
| |
|
| | segment = self._create_enhanced_segment( |
| | para, |
| | f"Paragraph {i + 1}", |
| | char_pos, |
| | char_pos + len(para.encode("utf-8")), |
| | importance_score, |
| | content_type, |
| | ) |
| | segments.append(segment) |
| | char_pos += len(para.encode("utf-8")) + 2 |
| |
|
| | return segments |
| |
|
| | |
| |
|
| | def _identify_algorithm_blocks(self, content: str) -> List[Dict]: |
| | """Identify algorithm blocks and related descriptions""" |
| | algorithm_blocks = [] |
| |
|
| | |
| | algorithm_patterns = [ |
| | r"(?i)(algorithm\s+\d+|procedure\s+\d+|method\s+\d+).*?(?=algorithm\s+\d+|procedure\s+\d+|method\s+\d+|$)", |
| | r"(?i)(input:|output:|returns?:|require:|ensure:).*?(?=\n\s*\n|\n\s*(?:input:|output:|returns?:|require:|ensure:)|$)", |
| | r"(?i)(for\s+each|while|if.*then|repeat.*until).*?(?=\n\s*\n|$)", |
| | r"(?i)(step\s+\d+|phase\s+\d+).*?(?=step\s+\d+|phase\s+\d+|\n\s*\n|$)", |
| | ] |
| |
|
| | for pattern in algorithm_patterns: |
| | matches = re.finditer(pattern, content, re.DOTALL) |
| | for match in matches: |
| | |
| | start = max(0, match.start() - 300) |
| | end = min(len(content), match.end() + 500) |
| |
|
| | |
| | while start > 0 and content[start] not in "\n.!?": |
| | start -= 1 |
| | while end < len(content) and content[end] not in "\n.!?": |
| | end += 1 |
| |
|
| | algorithm_blocks.append( |
| | { |
| | "start_pos": start, |
| | "end_pos": end, |
| | "content": content[start:end].strip(), |
| | "title": self._extract_algorithm_title( |
| | content[match.start() : match.end()] |
| | ), |
| | "importance_score": 0.95, |
| | "content_type": "algorithm", |
| | } |
| | ) |
| |
|
| | return algorithm_blocks |
| |
|
| | def _identify_concept_groups(self, content: str) -> List[Dict]: |
| | """Identify concept definition groups""" |
| | concept_groups = [] |
| |
|
| | |
| | concept_patterns = [ |
| | r"(?i)(definition|define|let|denote|given).*?(?=\n\s*\n|definition|define|let|denote|$)", |
| | r"(?i)(theorem|lemma|proposition|corollary).*?(?=\n\s*\n|theorem|lemma|proposition|corollary|$)", |
| | r"(?i)(notation|symbol|parameter).*?(?=\n\s*\n|notation|symbol|parameter|$)", |
| | ] |
| |
|
| | for pattern in concept_patterns: |
| | matches = re.finditer(pattern, content, re.DOTALL) |
| | for match in matches: |
| | |
| | start = max(0, match.start() - 200) |
| | end = min(len(content), match.end() + 300) |
| |
|
| | concept_groups.append( |
| | { |
| | "start_pos": start, |
| | "end_pos": end, |
| | "content": content[start:end].strip(), |
| | "title": self._extract_concept_title( |
| | content[match.start() : match.end()] |
| | ), |
| | "importance_score": 0.85, |
| | "content_type": "concept", |
| | } |
| | ) |
| |
|
| | return concept_groups |
| |
|
| | def _identify_formula_chains(self, content: str) -> List[Dict]: |
| | """Identify formula derivation chains""" |
| | formula_chains = [] |
| |
|
| | |
| | formula_patterns = [ |
| | r"\$\$.*?\$\$", |
| | r"\$[^$]+\$", |
| | r"(?i)(equation|formula).*?(?=\n\s*\n|equation|formula|$)", |
| | r"(?i)(where|such that|given that).*?(?=\n\s*\n|where|such that|given that|$)", |
| | ] |
| |
|
| | |
| | formula_positions = [] |
| | for pattern in formula_patterns: |
| | matches = re.finditer(pattern, content, re.DOTALL) |
| | for match in matches: |
| | formula_positions.append((match.start(), match.end())) |
| |
|
| | |
| | formula_positions.sort() |
| | if formula_positions: |
| | current_chain_start = formula_positions[0][0] |
| | current_chain_end = formula_positions[0][1] |
| |
|
| | for start, end in formula_positions[1:]: |
| | if ( |
| | start - current_chain_end < 500 |
| | ): |
| | current_chain_end = end |
| | else: |
| | |
| | formula_chains.append( |
| | { |
| | "start_pos": max(0, current_chain_start - 200), |
| | "end_pos": min(len(content), current_chain_end + 200), |
| | "content": content[ |
| | max(0, current_chain_start - 200) : min( |
| | len(content), current_chain_end + 200 |
| | ) |
| | ].strip(), |
| | "title": "Mathematical Formulation", |
| | "importance_score": 0.9, |
| | "content_type": "formula", |
| | } |
| | ) |
| | current_chain_start = start |
| | current_chain_end = end |
| |
|
| | |
| | formula_chains.append( |
| | { |
| | "start_pos": max(0, current_chain_start - 200), |
| | "end_pos": min(len(content), current_chain_end + 200), |
| | "content": content[ |
| | max(0, current_chain_start - 200) : min( |
| | len(content), current_chain_end + 200 |
| | ) |
| | ].strip(), |
| | "title": "Mathematical Formulation", |
| | "importance_score": 0.9, |
| | "content_type": "formula", |
| | } |
| | ) |
| |
|
| | return formula_chains |
| |
|
| | def _merge_related_content_blocks( |
| | self, |
| | algorithm_blocks: List[Dict], |
| | concept_groups: List[Dict], |
| | formula_chains: List[Dict], |
| | content: str, |
| | ) -> List[Dict]: |
| | """Merge related content blocks to ensure integrity""" |
| | all_blocks = algorithm_blocks + concept_groups + formula_chains |
| | all_blocks.sort(key=lambda x: x["start_pos"]) |
| |
|
| | merged_blocks = [] |
| | i = 0 |
| |
|
| | while i < len(all_blocks): |
| | current_block = all_blocks[i] |
| |
|
| | |
| | while i + 1 < len(all_blocks): |
| | next_block = all_blocks[i + 1] |
| |
|
| | |
| | if next_block["start_pos"] - current_block[ |
| | "end_pos" |
| | ] < 300 or self._are_blocks_related(current_block, next_block): |
| | |
| | merged_content = content[ |
| | current_block["start_pos"] : next_block["end_pos"] |
| | ] |
| | current_block = { |
| | "start_pos": current_block["start_pos"], |
| | "end_pos": next_block["end_pos"], |
| | "content": merged_content.strip(), |
| | "title": f"{current_block['title']} & {next_block['title']}", |
| | "importance_score": max( |
| | current_block["importance_score"], |
| | next_block["importance_score"], |
| | ), |
| | "content_type": "merged", |
| | } |
| | i += 1 |
| | else: |
| | break |
| |
|
| | merged_blocks.append(current_block) |
| | i += 1 |
| |
|
| | return merged_blocks |
| |
|
| | def _are_blocks_related(self, block1: Dict, block2: Dict) -> bool: |
| | """Determine if two content blocks are related""" |
| | |
| | related_types = [ |
| | ("algorithm", "formula"), |
| | ("concept", "algorithm"), |
| | ("formula", "concept"), |
| | ] |
| |
|
| | for type1, type2 in related_types: |
| | if ( |
| | block1["content_type"] == type1 and block2["content_type"] == type2 |
| | ) or (block1["content_type"] == type2 and block2["content_type"] == type1): |
| | return True |
| |
|
| | return False |
| |
|
| | def _extract_algorithm_title(self, text: str) -> str: |
| | """Extract title from algorithm text""" |
| | lines = text.split("\n")[:3] |
| | for line in lines: |
| | line = line.strip() |
| | if line and len(line) < 100: |
| | |
| | title = re.sub(r"[^\w\s-]", "", line) |
| | if title: |
| | return title[:50] |
| | return "Algorithm Block" |
| |
|
| | def _extract_concept_title(self, text: str) -> str: |
| | """Extract title from concept text""" |
| | lines = text.split("\n")[:2] |
| | for line in lines: |
| | line = line.strip() |
| | if line and len(line) < 80: |
| | title = re.sub(r"[^\w\s-]", "", line) |
| | if title: |
| | return title[:50] |
| | return "Concept Definition" |
| |
|
| | def _create_enhanced_segment( |
| | self, |
| | content: str, |
| | title: str, |
| | start_pos: int, |
| | end_pos: int, |
| | importance_score: float, |
| | content_type: str, |
| | ) -> DocumentSegment: |
| | """Create enhanced document segment""" |
| | |
| | segment_id = hashlib.md5( |
| | f"{title}_{start_pos}_{end_pos}_{importance_score}".encode() |
| | ).hexdigest()[:8] |
| |
|
| | |
| | keywords = self._extract_enhanced_keywords(content, content_type) |
| |
|
| | |
| | relevance_scores = self._calculate_enhanced_relevance_scores( |
| | content, content_type, importance_score |
| | ) |
| |
|
| | return DocumentSegment( |
| | id=segment_id, |
| | title=title, |
| | content=content, |
| | content_type=content_type, |
| | keywords=keywords, |
| | char_start=start_pos, |
| | char_end=end_pos, |
| | char_count=len(content), |
| | relevance_scores=relevance_scores, |
| | section_path=title, |
| | ) |
| |
|
| | def _extract_enhanced_keywords(self, content: str, content_type: str) -> List[str]: |
| | """Extract enhanced keywords based on content type""" |
| | words = re.findall(r"\b[a-zA-Z]{3,}\b", content.lower()) |
| |
|
| | |
| | if content_type == "algorithm": |
| | algorithm_stopwords = { |
| | "step", |
| | "then", |
| | "else", |
| | "end", |
| | "begin", |
| | "start", |
| | "stop", |
| | } |
| | words = [w for w in words if w not in algorithm_stopwords] |
| | elif content_type == "formula": |
| | formula_keywords = ["equation", "formula", "where", "given", "such", "that"] |
| | words.extend(formula_keywords) |
| |
|
| | |
| | general_stopwords = { |
| | "the", |
| | "and", |
| | "for", |
| | "are", |
| | "but", |
| | "not", |
| | "you", |
| | "all", |
| | "can", |
| | "her", |
| | "was", |
| | "one", |
| | "our", |
| | "had", |
| | "but", |
| | "have", |
| | "this", |
| | "that", |
| | "with", |
| | "from", |
| | "they", |
| | "she", |
| | "been", |
| | "were", |
| | "said", |
| | "each", |
| | "which", |
| | "their", |
| | } |
| |
|
| | keywords = [w for w in set(words) if w not in general_stopwords and len(w) > 3] |
| | return keywords[:25] |
| |
|
| | def _calculate_enhanced_relevance_scores( |
| | self, content: str, content_type: str, importance_score: float |
| | ) -> Dict[str, float]: |
| | """Calculate enhanced relevance scores""" |
| | content_lower = content.lower() |
| |
|
| | base_scores = { |
| | "concept_analysis": 0.5, |
| | "algorithm_extraction": 0.5, |
| | "code_planning": 0.5, |
| | } |
| |
|
| | |
| | if content_type == "algorithm": |
| | base_scores["algorithm_extraction"] = importance_score |
| | base_scores["code_planning"] = importance_score * 0.9 |
| | base_scores["concept_analysis"] = importance_score * 0.7 |
| | elif content_type == "concept": |
| | base_scores["concept_analysis"] = importance_score |
| | base_scores["algorithm_extraction"] = importance_score * 0.8 |
| | base_scores["code_planning"] = importance_score * 0.6 |
| | elif content_type == "formula": |
| | base_scores["algorithm_extraction"] = importance_score |
| | base_scores["concept_analysis"] = importance_score * 0.8 |
| | base_scores["code_planning"] = importance_score * 0.9 |
| | elif content_type == "merged": |
| | |
| | base_scores = {k: importance_score * 0.95 for k in base_scores} |
| |
|
| | |
| | algorithm_indicators = ["algorithm", "method", "procedure", "step", "process"] |
| | concept_indicators = ["definition", "concept", "framework", "approach"] |
| | implementation_indicators = ["implementation", "code", "function", "design"] |
| |
|
| | for query_type, indicators in [ |
| | ("algorithm_extraction", algorithm_indicators), |
| | ("concept_analysis", concept_indicators), |
| | ("code_planning", implementation_indicators), |
| | ]: |
| | density_bonus = ( |
| | sum(1 for indicator in indicators if indicator in content_lower) * 0.1 |
| | ) |
| | base_scores[query_type] = min(1.0, base_scores[query_type] + density_bonus) |
| |
|
| | return base_scores |
| |
|
| | |
| | def _identify_research_paper_sections(self, content: str) -> List[Dict]: |
| | """Identify research paper sections - simplified implementation""" |
| | |
| | return self._detect_academic_sections(content) |
| |
|
| | def _enhance_section_with_context(self, section: Dict, content: str) -> Dict: |
| | """Add context to sections - simplified implementation""" |
| | return section |
| |
|
| | def _identify_concept_implementation_pairs(self, content: str) -> List[Dict]: |
| | """Identify concept-implementation pairs - simplified implementation""" |
| | return [] |
| |
|
| | def _merge_concept_with_implementation(self, pair: Dict, content: str) -> Dict: |
| | """Merge concepts with implementation - simplified implementation""" |
| | return pair |
| |
|
| | def _detect_semantic_boundaries(self, content: str) -> List[Dict]: |
| | """Detect semantic boundaries - based on paragraphs and logical separators""" |
| | boundaries = [] |
| |
|
| | |
| | paragraphs = content.split("\n\n") |
| | current_pos = 0 |
| |
|
| | for i, para in enumerate(paragraphs): |
| | if len(para.strip()) > 100: |
| | |
| | content_type = self._classify_paragraph_type(para) |
| | importance_score = self._calculate_paragraph_importance( |
| | para, content_type |
| | ) |
| |
|
| | boundaries.append( |
| | { |
| | "position": current_pos + len(para), |
| | "suggested_title": self._extract_paragraph_title(para, i + 1), |
| | "importance_score": importance_score, |
| | "content_type": content_type, |
| | } |
| | ) |
| |
|
| | current_pos += len(para) + 2 |
| |
|
| | return boundaries |
| |
|
| | def _classify_paragraph_type(self, paragraph: str) -> str: |
| | """Classify paragraph type""" |
| | para_lower = paragraph.lower() |
| |
|
| | if "algorithm" in para_lower or "procedure" in para_lower: |
| | return "algorithm" |
| | elif "formula" in para_lower or "$$" in paragraph: |
| | return "formula" |
| | elif any( |
| | word in para_lower for word in ["introduction", "overview", "abstract"] |
| | ): |
| | return "introduction" |
| | elif any(word in para_lower for word in ["conclusion", "summary", "result"]): |
| | return "conclusion" |
| | else: |
| | return "general" |
| |
|
| | def _calculate_paragraph_importance( |
| | self, paragraph: str, content_type: str |
| | ) -> float: |
| | """Calculate paragraph importance""" |
| | if content_type == "algorithm": |
| | return 0.95 |
| | elif content_type == "formula": |
| | return 0.9 |
| | elif content_type == "introduction": |
| | return 0.85 |
| | elif content_type == "conclusion": |
| | return 0.8 |
| | else: |
| | return 0.7 |
| |
|
| | def _extract_paragraph_title(self, paragraph: str, index: int) -> str: |
| | """Extract paragraph title""" |
| | lines = paragraph.split("\n") |
| | for line in lines[:2]: |
| | if line.startswith("#"): |
| | return line.strip("# ") |
| | elif len(line) < 80 and line.strip(): |
| | return line.strip() |
| | return f"Section {index}" |
| |
|
| | def _calculate_optimal_chunk_size(self, content: str) -> int: |
| | """Calculate optimal chunk size""" |
| | |
| | complexity = self.analyzer._calculate_concept_complexity(content) |
| | if complexity > 0.7: |
| | return 4000 |
| | elif complexity > 0.4: |
| | return 3000 |
| | else: |
| | return 2000 |
| |
|
| | def _create_content_aware_chunks(self, content: str, chunk_size: int) -> List[Dict]: |
| | """Create content-aware chunks - simplified implementation""" |
| | chunks = [] |
| | paragraphs = [p.strip() for p in content.split("\n\n") if p.strip()] |
| |
|
| | current_chunk = [] |
| | current_size = 0 |
| | start_pos = 0 |
| |
|
| | for para in paragraphs: |
| | para_size = len(para) |
| |
|
| | if current_size + para_size > chunk_size and current_chunk: |
| | chunk_content = "\n\n".join(current_chunk) |
| | chunks.append( |
| | { |
| | "content": chunk_content, |
| | "title": f"Section {len(chunks) + 1}", |
| | "start_pos": start_pos, |
| | "end_pos": start_pos + len(chunk_content), |
| | "importance_score": 0.7, |
| | "content_type": "general", |
| | } |
| | ) |
| |
|
| | current_chunk = [para] |
| | current_size = para_size |
| | start_pos += len(chunk_content) + 2 |
| | else: |
| | current_chunk.append(para) |
| | current_size += para_size |
| |
|
| | |
| | if current_chunk: |
| | chunk_content = "\n\n".join(current_chunk) |
| | chunks.append( |
| | { |
| | "content": chunk_content, |
| | "title": f"Section {len(chunks) + 1}", |
| | "start_pos": start_pos, |
| | "end_pos": start_pos + len(chunk_content), |
| | "importance_score": 0.7, |
| | "content_type": "general", |
| | } |
| | ) |
| |
|
| | return chunks |
| |
|
| | def _create_segment( |
| | self, content: str, title: str, start_pos: int, end_pos: int |
| | ) -> DocumentSegment: |
| | """Create a DocumentSegment with metadata""" |
| | |
| | segment_id = hashlib.md5(f"{title}_{start_pos}_{end_pos}".encode()).hexdigest()[ |
| | :8 |
| | ] |
| |
|
| | |
| | keywords = self._extract_keywords(content) |
| |
|
| | |
| | content_type = self._classify_content_type(title, content) |
| |
|
| | |
| | relevance_scores = self._calculate_relevance_scores(content, content_type) |
| |
|
| | return DocumentSegment( |
| | id=segment_id, |
| | title=title, |
| | content=content, |
| | content_type=content_type, |
| | keywords=keywords, |
| | char_start=start_pos, |
| | char_end=end_pos, |
| | char_count=len(content), |
| | relevance_scores=relevance_scores, |
| | section_path=title, |
| | ) |
| |
|
| | def _extract_keywords(self, content: str) -> List[str]: |
| | """Extract relevant keywords from content""" |
| | |
| | words = re.findall(r"\b[a-zA-Z]{3,}\b", content.lower()) |
| |
|
| | |
| | stopwords = { |
| | "the", |
| | "and", |
| | "for", |
| | "are", |
| | "but", |
| | "not", |
| | "you", |
| | "all", |
| | "can", |
| | "her", |
| | "was", |
| | "one", |
| | "our", |
| | "had", |
| | "but", |
| | "have", |
| | "this", |
| | "that", |
| | "with", |
| | "from", |
| | "they", |
| | "she", |
| | "been", |
| | "were", |
| | "said", |
| | "each", |
| | "which", |
| | "their", |
| | } |
| |
|
| | keywords = [w for w in set(words) if w not in stopwords and len(w) > 3] |
| | return keywords[:20] |
| |
|
| | def _classify_content_type(self, title: str, content: str) -> str: |
| | """Classify the type of content based on title and content""" |
| | title_lower = title.lower() |
| | content_lower = content.lower() |
| |
|
| | if any( |
| | word in title_lower for word in ["introduction", "abstract", "overview"] |
| | ): |
| | return "introduction" |
| | elif any(word in title_lower for word in ["method", "approach", "algorithm"]): |
| | return "methodology" |
| | elif any( |
| | word in title_lower for word in ["experiment", "evaluation", "result"] |
| | ): |
| | return "experiment" |
| | elif any( |
| | word in title_lower for word in ["conclusion", "discussion", "summary"] |
| | ): |
| | return "conclusion" |
| | elif any(word in title_lower for word in ["reference", "bibliography"]): |
| | return "references" |
| | elif "algorithm" in content_lower or "procedure" in content_lower: |
| | return "algorithm" |
| | else: |
| | return "general" |
| |
|
| | def _calculate_relevance_scores( |
| | self, content: str, content_type: str |
| | ) -> Dict[str, float]: |
| | """Calculate relevance scores for different query types""" |
| | content_lower = content.lower() |
| |
|
| | scores = { |
| | "concept_analysis": 0.5, |
| | "algorithm_extraction": 0.5, |
| | "code_planning": 0.5, |
| | } |
| |
|
| | |
| | concept_indicators = [ |
| | "introduction", |
| | "overview", |
| | "architecture", |
| | "system", |
| | "framework", |
| | "concept", |
| | "approach", |
| | ] |
| | concept_score = sum( |
| | 1 for indicator in concept_indicators if indicator in content_lower |
| | ) / len(concept_indicators) |
| | scores["concept_analysis"] = min( |
| | 1.0, concept_score + (0.8 if content_type == "introduction" else 0) |
| | ) |
| |
|
| | |
| | algorithm_indicators = [ |
| | "algorithm", |
| | "method", |
| | "procedure", |
| | "formula", |
| | "equation", |
| | "step", |
| | "process", |
| | ] |
| | algorithm_score = sum( |
| | 1 for indicator in algorithm_indicators if indicator in content_lower |
| | ) / len(algorithm_indicators) |
| | scores["algorithm_extraction"] = min( |
| | 1.0, algorithm_score + (0.9 if content_type == "methodology" else 0) |
| | ) |
| |
|
| | |
| | code_indicators = [ |
| | "implementation", |
| | "code", |
| | "function", |
| | "class", |
| | "module", |
| | "structure", |
| | "design", |
| | ] |
| | code_score = sum( |
| | 1 for indicator in code_indicators if indicator in content_lower |
| | ) / len(code_indicators) |
| | scores["code_planning"] = min( |
| | 1.0, |
| | code_score + (0.7 if content_type in ["methodology", "algorithm"] else 0), |
| | ) |
| |
|
| | return scores |
| |
|
| |
|
| | |
| | DOCUMENT_INDEXES: Dict[str, DocumentIndex] = {} |
| | segmenter = DocumentSegmenter() |
| |
|
| |
|
| | def get_segments_dir(paper_dir: str) -> str: |
| | """Get the segments directory path""" |
| | return os.path.join(paper_dir, "document_segments") |
| |
|
| |
|
| | def ensure_segments_dir_exists(segments_dir: str): |
| | """Ensure segments directory exists""" |
| | os.makedirs(segments_dir, exist_ok=True) |
| |
|
| |
|
| | @mcp.tool() |
| | async def analyze_and_segment_document( |
| | paper_dir: str, force_refresh: bool = False |
| | ) -> str: |
| | """ |
| | Analyze document structure and create intelligent segments |
| | |
| | Args: |
| | paper_dir: Path to the paper directory |
| | force_refresh: Whether to force re-analysis even if segments exist |
| | |
| | Returns: |
| | JSON string with segmentation results |
| | """ |
| | try: |
| | |
| | md_files = [f for f in os.listdir(paper_dir) if f.endswith(".md")] |
| | if not md_files: |
| | return json.dumps( |
| | { |
| | "status": "error", |
| | "message": f"No markdown file found in {paper_dir}", |
| | }, |
| | ensure_ascii=False, |
| | indent=2, |
| | ) |
| |
|
| | md_file_path = os.path.join(paper_dir, md_files[0]) |
| | segments_dir = get_segments_dir(paper_dir) |
| | index_file_path = os.path.join(segments_dir, "document_index.json") |
| |
|
| | |
| | if not force_refresh and os.path.exists(index_file_path): |
| | try: |
| | with open(index_file_path, "r", encoding="utf-8") as f: |
| | existing_index = json.load(f) |
| |
|
| | |
| | if "segments" in existing_index: |
| | segments_data = [] |
| | for seg_data in existing_index["segments"]: |
| | |
| | segment_dict = dict(seg_data) |
| |
|
| | if "content_type" not in segment_dict: |
| | segment_dict["content_type"] = "general" |
| | if "keywords" not in segment_dict: |
| | segment_dict["keywords"] = [] |
| | if "relevance_scores" not in segment_dict: |
| | segment_dict["relevance_scores"] = { |
| | "concept_analysis": 0.5, |
| | "algorithm_extraction": 0.5, |
| | "code_planning": 0.5, |
| | } |
| | if "section_path" not in segment_dict: |
| | segment_dict["section_path"] = segment_dict.get( |
| | "title", "Unknown" |
| | ) |
| |
|
| | segments_data.append(DocumentSegment(**segment_dict)) |
| |
|
| | existing_index["segments"] = segments_data |
| |
|
| | DOCUMENT_INDEXES[paper_dir] = DocumentIndex(**existing_index) |
| | return json.dumps( |
| | { |
| | "status": "success", |
| | "message": "Using existing document analysis", |
| | "segments_dir": segments_dir, |
| | "total_segments": existing_index["total_segments"], |
| | }, |
| | ensure_ascii=False, |
| | indent=2, |
| | ) |
| |
|
| | except Exception as e: |
| | logger.error(f"Failed to load existing index: {e}") |
| | logger.info("Will perform fresh analysis instead") |
| | |
| | try: |
| | os.remove(index_file_path) |
| | except Exception as e: |
| | pass |
| |
|
| | |
| | with open(md_file_path, "r", encoding="utf-8") as f: |
| | content = f.read() |
| |
|
| | |
| | analyzer = DocumentAnalyzer() |
| | doc_type, confidence = analyzer.analyze_document_type(content) |
| | strategy = analyzer.detect_segmentation_strategy(content, doc_type) |
| |
|
| | |
| | segments = segmenter.segment_document(content, strategy) |
| |
|
| | |
| | document_index = DocumentIndex( |
| | document_path=md_file_path, |
| | document_type=doc_type, |
| | segmentation_strategy=strategy, |
| | total_segments=len(segments), |
| | total_chars=len(content), |
| | segments=segments, |
| | created_at=datetime.now().isoformat(), |
| | ) |
| |
|
| | |
| | ensure_segments_dir_exists(segments_dir) |
| |
|
| | |
| | with open(index_file_path, "w", encoding="utf-8") as f: |
| | json.dump( |
| | asdict(document_index), f, ensure_ascii=False, indent=2, default=str |
| | ) |
| |
|
| | |
| | for segment in segments: |
| | segment_file_path = os.path.join(segments_dir, f"segment_{segment.id}.md") |
| | with open(segment_file_path, "w", encoding="utf-8") as f: |
| | f.write(f"# {segment.title}\n\n") |
| | f.write(f"**Content Type:** {segment.content_type}\n") |
| | f.write(f"**Keywords:** {', '.join(segment.keywords[:10])}\n\n") |
| | f.write(segment.content) |
| |
|
| | |
| | DOCUMENT_INDEXES[paper_dir] = document_index |
| |
|
| | logger.info( |
| | f"Document segmentation completed: {len(segments)} segments created" |
| | ) |
| |
|
| | return json.dumps( |
| | { |
| | "status": "success", |
| | "message": f"Document analysis completed with {strategy} strategy", |
| | "document_type": doc_type, |
| | "segmentation_strategy": strategy, |
| | "segments_dir": segments_dir, |
| | "total_segments": len(segments), |
| | "total_chars": len(content), |
| | }, |
| | ensure_ascii=False, |
| | indent=2, |
| | ) |
| |
|
| | except Exception as e: |
| | logger.error(f"Error in analyze_and_segment_document: {e}") |
| | return json.dumps( |
| | {"status": "error", "message": f"Failed to analyze document: {str(e)}"}, |
| | ensure_ascii=False, |
| | indent=2, |
| | ) |
| |
|
| |
|
| | @mcp.tool() |
| | async def read_document_segments( |
| | paper_dir: str, |
| | query_type: str, |
| | keywords: List[str] = None, |
| | max_segments: int = 3, |
| | max_total_chars: int = None, |
| | ) -> str: |
| | """ |
| | Intelligently retrieve relevant document segments based on query type |
| | |
| | Args: |
| | paper_dir: Path to the paper directory |
| | query_type: Type of query - "concept_analysis", "algorithm_extraction", or "code_planning" |
| | keywords: Optional list of keywords to search for |
| | max_segments: Maximum number of segments to return |
| | max_total_chars: Maximum total characters to return |
| | |
| | Returns: |
| | JSON string with selected segments |
| | """ |
| | try: |
| | |
| | if paper_dir not in DOCUMENT_INDEXES: |
| | segments_dir = get_segments_dir(paper_dir) |
| | index_file_path = os.path.join(segments_dir, "document_index.json") |
| |
|
| | if os.path.exists(index_file_path): |
| | with open(index_file_path, "r", encoding="utf-8") as f: |
| | index_data = json.load(f) |
| | |
| | segments_data = [] |
| | for seg_data in index_data.get("segments", []): |
| | |
| | segment_dict = dict(seg_data) |
| |
|
| | |
| | if "content_type" not in segment_dict: |
| | segment_dict["content_type"] = "general" |
| | if "keywords" not in segment_dict: |
| | segment_dict["keywords"] = [] |
| | if "relevance_scores" not in segment_dict: |
| | segment_dict["relevance_scores"] = { |
| | "concept_analysis": 0.5, |
| | "algorithm_extraction": 0.5, |
| | "code_planning": 0.5, |
| | } |
| | if "section_path" not in segment_dict: |
| | segment_dict["section_path"] = segment_dict.get( |
| | "title", "Unknown" |
| | ) |
| |
|
| | segment = DocumentSegment(**segment_dict) |
| | segments_data.append(segment) |
| |
|
| | index_data["segments"] = segments_data |
| | DOCUMENT_INDEXES[paper_dir] = DocumentIndex(**index_data) |
| | else: |
| | |
| | await analyze_and_segment_document(paper_dir) |
| |
|
| | document_index = DOCUMENT_INDEXES[paper_dir] |
| |
|
| | |
| | if max_total_chars is None: |
| | max_total_chars = _calculate_adaptive_char_limit(document_index, query_type) |
| |
|
| | |
| | scored_segments = [] |
| | for segment in document_index.segments: |
| | |
| | relevance_score = segment.relevance_scores.get(query_type, 0.5) |
| |
|
| | |
| | if keywords: |
| | keyword_score = _calculate_enhanced_keyword_score(segment, keywords) |
| | relevance_score += keyword_score |
| |
|
| | |
| | completeness_bonus = _calculate_completeness_bonus(segment, document_index) |
| | relevance_score += completeness_bonus |
| |
|
| | scored_segments.append((segment, relevance_score)) |
| |
|
| | |
| | scored_segments.sort(key=lambda x: x[1], reverse=True) |
| |
|
| | |
| | selected_segments = _select_segments_with_integrity( |
| | scored_segments, max_segments, max_total_chars, query_type |
| | ) |
| |
|
| | total_chars = sum(seg["char_count"] for seg in selected_segments) |
| |
|
| | logger.info( |
| | f"Selected {len(selected_segments)} segments for {query_type} query" |
| | ) |
| |
|
| | return json.dumps( |
| | { |
| | "status": "success", |
| | "query_type": query_type, |
| | "keywords": keywords or [], |
| | "total_segments_available": len(document_index.segments), |
| | "segments_selected": len(selected_segments), |
| | "total_chars": total_chars, |
| | "max_chars_used": max_total_chars, |
| | "segments": selected_segments, |
| | }, |
| | ensure_ascii=False, |
| | indent=2, |
| | ) |
| |
|
| | except Exception as e: |
| | logger.error(f"Error in read_document_segments: {e}") |
| | return json.dumps( |
| | { |
| | "status": "error", |
| | "message": f"Failed to read document segments: {str(e)}", |
| | }, |
| | ensure_ascii=False, |
| | indent=2, |
| | ) |
| |
|
| |
|
| | @mcp.tool() |
| | async def get_document_overview(paper_dir: str) -> str: |
| | """ |
| | Get overview of document structure and available segments |
| | |
| | Args: |
| | paper_dir: Path to the paper directory |
| | |
| | Returns: |
| | JSON string with document overview |
| | """ |
| | try: |
| | |
| | if paper_dir not in DOCUMENT_INDEXES: |
| | await analyze_and_segment_document(paper_dir) |
| |
|
| | document_index = DOCUMENT_INDEXES[paper_dir] |
| |
|
| | |
| | segment_summaries = [] |
| | for segment in document_index.segments: |
| | segment_summaries.append( |
| | { |
| | "id": segment.id, |
| | "title": segment.title, |
| | "content_type": segment.content_type, |
| | "char_count": segment.char_count, |
| | "keywords": segment.keywords[:5], |
| | "relevance_scores": segment.relevance_scores, |
| | } |
| | ) |
| |
|
| | return json.dumps( |
| | { |
| | "status": "success", |
| | "document_path": document_index.document_path, |
| | "document_type": document_index.document_type, |
| | "segmentation_strategy": document_index.segmentation_strategy, |
| | "total_segments": document_index.total_segments, |
| | "total_chars": document_index.total_chars, |
| | "created_at": document_index.created_at, |
| | "segments_overview": segment_summaries, |
| | }, |
| | ensure_ascii=False, |
| | indent=2, |
| | ) |
| |
|
| | except Exception as e: |
| | logger.error(f"Error in get_document_overview: {e}") |
| | return json.dumps( |
| | { |
| | "status": "error", |
| | "message": f"Failed to get document overview: {str(e)}", |
| | }, |
| | ensure_ascii=False, |
| | indent=2, |
| | ) |
| |
|
| |
|
| | |
| |
|
| |
|
| | def _calculate_adaptive_char_limit( |
| | document_index: DocumentIndex, query_type: str |
| | ) -> int: |
| | """Dynamically calculate character limit based on document complexity and query type""" |
| | base_limit = 6000 |
| |
|
| | |
| | if document_index.document_type == "research_paper": |
| | base_limit = 10000 |
| | elif document_index.document_type == "algorithm_focused": |
| | base_limit = 12000 |
| | elif document_index.segmentation_strategy == "algorithm_preserve_integrity": |
| | base_limit = 15000 |
| |
|
| | |
| | query_multipliers = { |
| | "algorithm_extraction": 1.5, |
| | "concept_analysis": 1.2, |
| | "code_planning": 1.3, |
| | } |
| |
|
| | multiplier = query_multipliers.get(query_type, 1.0) |
| | return int(base_limit * multiplier) |
| |
|
| |
|
| | def _calculate_enhanced_keyword_score( |
| | segment: DocumentSegment, keywords: List[str] |
| | ) -> float: |
| | """Calculate enhanced keyword matching score""" |
| | score = 0.0 |
| | content_lower = segment.content.lower() |
| | title_lower = segment.title.lower() |
| |
|
| | for keyword in keywords: |
| | keyword_lower = keyword.lower() |
| |
|
| | |
| | if keyword_lower in title_lower: |
| | score += 0.3 |
| |
|
| | |
| | content_matches = content_lower.count(keyword_lower) |
| | if content_matches > 0: |
| | |
| | frequency_score = min(0.2, content_matches * 0.05) |
| |
|
| | |
| | early_content = content_lower[: len(content_lower) // 4] |
| | if keyword_lower in early_content: |
| | frequency_score += 0.1 |
| |
|
| | score += frequency_score |
| |
|
| | return min(0.6, score) |
| |
|
| |
|
| | def _calculate_completeness_bonus( |
| | segment: DocumentSegment, document_index: DocumentIndex |
| | ) -> float: |
| | """Calculate content completeness bonus""" |
| | bonus = 0.0 |
| |
|
| | |
| | if segment.content_type in ["algorithm", "formula", "merged"]: |
| | bonus += 0.2 |
| |
|
| | |
| | if segment.char_count > 2000: |
| | bonus += 0.1 |
| | elif segment.char_count > 4000: |
| | bonus += 0.15 |
| |
|
| | |
| | if segment.relevance_scores.get("algorithm_extraction", 0) > 0.8: |
| | bonus += 0.1 |
| |
|
| | return min(0.3, bonus) |
| |
|
| |
|
| | def _select_segments_with_integrity( |
| | scored_segments: List[Tuple], |
| | max_segments: int, |
| | max_total_chars: int, |
| | query_type: str, |
| | ) -> List[Dict]: |
| | """Intelligently select segments while maintaining content integrity""" |
| | selected_segments = [] |
| | total_chars = 0 |
| |
|
| | |
| | for segment, score in scored_segments: |
| | if len(selected_segments) >= max_segments: |
| | break |
| |
|
| | if total_chars + segment.char_count <= max_total_chars: |
| | selected_segments.append( |
| | { |
| | "id": segment.id, |
| | "title": segment.title, |
| | "content": segment.content, |
| | "content_type": segment.content_type, |
| | "relevance_score": score, |
| | "char_count": segment.char_count, |
| | } |
| | ) |
| | total_chars += segment.char_count |
| | elif len(selected_segments) == 0: |
| | |
| | truncated_content = ( |
| | segment.content[: max_total_chars - 200] |
| | + "\n\n[Content truncated for length...]" |
| | ) |
| | selected_segments.append( |
| | { |
| | "id": segment.id, |
| | "title": segment.title, |
| | "content": truncated_content, |
| | "content_type": segment.content_type, |
| | "relevance_score": score, |
| | "char_count": len(truncated_content), |
| | } |
| | ) |
| | break |
| |
|
| | |
| | remaining_chars = max_total_chars - total_chars |
| | if remaining_chars > 500 and len(selected_segments) < max_segments: |
| | for segment, score in scored_segments[len(selected_segments) :]: |
| | if ( |
| | segment.char_count <= remaining_chars |
| | and len(selected_segments) < max_segments |
| | ): |
| | selected_segments.append( |
| | { |
| | "id": segment.id, |
| | "title": segment.title, |
| | "content": segment.content, |
| | "content_type": segment.content_type, |
| | "relevance_score": score, |
| | "char_count": segment.char_count, |
| | } |
| | ) |
| | remaining_chars -= segment.char_count |
| |
|
| | return selected_segments |
| |
|
| |
|
| | if __name__ == "__main__": |
| | |
| | mcp.run() |
| |
|