| | """ |
| | File processing utilities for handling paper files and related operations. |
| | """ |
| |
|
| | import json |
| | import os |
| | import re |
| | from typing import Dict, List, Optional, Union |
| |
|
| |
|
| | class FileProcessor: |
| | """ |
| | A class to handle file processing operations including path extraction and file reading. |
| | """ |
| |
|
| | @staticmethod |
| | def extract_file_path(file_info: Union[str, Dict]) -> Optional[str]: |
| | """ |
| | Extract paper directory path from the input information. |
| | |
| | Args: |
| | file_info: Either a JSON string or a dictionary containing file information |
| | |
| | Returns: |
| | Optional[str]: The extracted paper directory path or None if not found |
| | """ |
| | try: |
| | |
| | if isinstance(file_info, str): |
| | |
| | if file_info.endswith( |
| | (".md", ".pdf", ".txt", ".docx", ".doc", ".html", ".htm") |
| | ): |
| | |
| | return os.path.dirname(os.path.abspath(file_info)) |
| | elif os.path.exists(file_info): |
| | if os.path.isfile(file_info): |
| | return os.path.dirname(os.path.abspath(file_info)) |
| | elif os.path.isdir(file_info): |
| | return os.path.abspath(file_info) |
| |
|
| | |
| | try: |
| | info_dict = json.loads(file_info) |
| | except json.JSONDecodeError: |
| | |
| | info_dict = FileProcessor.extract_json_from_text(file_info) |
| | if not info_dict: |
| | |
| | raise ValueError( |
| | f"Input is neither a valid file path nor JSON: {file_info}" |
| | ) |
| | else: |
| | info_dict = file_info |
| |
|
| | |
| | paper_path = info_dict.get("paper_path") |
| | if not paper_path: |
| | raise ValueError("No paper_path found in input dictionary") |
| |
|
| | |
| | paper_dir = os.path.dirname(paper_path) |
| |
|
| | |
| | if not os.path.isabs(paper_dir): |
| | paper_dir = os.path.abspath(paper_dir) |
| |
|
| | return paper_dir |
| |
|
| | except (AttributeError, TypeError) as e: |
| | raise ValueError(f"Invalid input format: {str(e)}") |
| |
|
| | @staticmethod |
| | def find_markdown_file(directory: str) -> Optional[str]: |
| | """ |
| | Find the first markdown file in the given directory. |
| | |
| | Args: |
| | directory: Directory path to search |
| | |
| | Returns: |
| | Optional[str]: Path to the markdown file or None if not found |
| | """ |
| | if not os.path.isdir(directory): |
| | return None |
| |
|
| | for file in os.listdir(directory): |
| | if file.endswith(".md"): |
| | return os.path.join(directory, file) |
| | return None |
| |
|
| | @staticmethod |
| | def parse_markdown_sections(content: str) -> List[Dict[str, Union[str, int, List]]]: |
| | """ |
| | Parse markdown content and organize it by sections based on headers. |
| | |
| | Args: |
| | content: The markdown content to parse |
| | |
| | Returns: |
| | List[Dict]: A list of sections, each containing: |
| | - level: The header level (1-6) |
| | - title: The section title |
| | - content: The section content |
| | - subsections: List of subsections |
| | """ |
| | |
| | lines = content.split("\n") |
| | sections = [] |
| | current_section = None |
| | current_content = [] |
| |
|
| | for line in lines: |
| | |
| | header_match = re.match(r"^(#{1,6})\s+(.+)$", line) |
| |
|
| | if header_match: |
| | |
| | if current_section is not None: |
| | current_section["content"] = "\n".join(current_content).strip() |
| | sections.append(current_section) |
| |
|
| | |
| | level = len(header_match.group(1)) |
| | title = header_match.group(2).strip() |
| | current_section = { |
| | "level": level, |
| | "title": title, |
| | "content": "", |
| | "subsections": [], |
| | } |
| | current_content = [] |
| | elif current_section is not None: |
| | current_content.append(line) |
| |
|
| | |
| | if current_section is not None: |
| | current_section["content"] = "\n".join(current_content).strip() |
| | sections.append(current_section) |
| |
|
| | return FileProcessor._organize_sections(sections) |
| |
|
| | @staticmethod |
| | def _organize_sections(sections: List[Dict]) -> List[Dict]: |
| | """ |
| | Organize sections into a hierarchical structure based on their levels. |
| | |
| | Args: |
| | sections: List of sections with their levels |
| | |
| | Returns: |
| | List[Dict]: Organized hierarchical structure of sections |
| | """ |
| | result = [] |
| | section_stack = [] |
| |
|
| | for section in sections: |
| | while section_stack and section_stack[-1]["level"] >= section["level"]: |
| | section_stack.pop() |
| |
|
| | if section_stack: |
| | section_stack[-1]["subsections"].append(section) |
| | else: |
| | result.append(section) |
| |
|
| | section_stack.append(section) |
| |
|
| | return result |
| |
|
| | @staticmethod |
| | async def read_file_content(file_path: str) -> str: |
| | """ |
| | Read the content of a file asynchronously. |
| | |
| | Args: |
| | file_path: Path to the file to read |
| | |
| | Returns: |
| | str: The content of the file |
| | |
| | Raises: |
| | FileNotFoundError: If the file doesn't exist |
| | IOError: If there's an error reading the file |
| | """ |
| | try: |
| | |
| | if not os.path.exists(file_path): |
| | raise FileNotFoundError(f"File not found: {file_path}") |
| |
|
| | |
| | with open(file_path, "rb") as f: |
| | header = f.read(8) |
| | if header.startswith(b"%PDF"): |
| | raise IOError( |
| | f"File {file_path} is a PDF file, not a text file. Please convert it to markdown format or use PDF processing tools." |
| | ) |
| |
|
| | |
| | |
| | |
| | with open(file_path, "r", encoding="utf-8") as f: |
| | content = f.read() |
| |
|
| | return content |
| |
|
| | except UnicodeDecodeError as e: |
| | raise IOError( |
| | f"Error reading file {file_path}: File encoding is not UTF-8. Original error: {str(e)}" |
| | ) |
| | except Exception as e: |
| | raise IOError(f"Error reading file {file_path}: {str(e)}") |
| |
|
| | @staticmethod |
| | def format_section_content(section: Dict) -> str: |
| | """ |
| | Format a section's content with standardized spacing and structure. |
| | |
| | Args: |
| | section: Dictionary containing section information |
| | |
| | Returns: |
| | str: Formatted section content |
| | """ |
| | |
| | formatted = f"\n{'#' * section['level']} {section['title']}\n" |
| |
|
| | |
| | if section["content"]: |
| | formatted += f"\n{section['content'].strip()}\n" |
| |
|
| | |
| | if section["subsections"]: |
| | |
| | if section["content"]: |
| | formatted += "\n---\n" |
| |
|
| | |
| | for subsection in section["subsections"]: |
| | formatted += FileProcessor.format_section_content(subsection) |
| |
|
| | |
| | formatted += "\n" + "=" * 80 + "\n" |
| |
|
| | return formatted |
| |
|
| | @staticmethod |
| | def standardize_output(sections: List[Dict]) -> str: |
| | """ |
| | Convert structured sections into a standardized string format. |
| | |
| | Args: |
| | sections: List of section dictionaries |
| | |
| | Returns: |
| | str: Standardized string output |
| | """ |
| | output = [] |
| |
|
| | |
| | for section in sections: |
| | output.append(FileProcessor.format_section_content(section)) |
| |
|
| | |
| | return "\n".join(output) |
| |
|
| | @classmethod |
| | async def process_file_input( |
| | cls, file_input: Union[str, Dict], base_dir: str = None |
| | ) -> Dict: |
| | """ |
| | Process file input information and return the structured content. |
| | |
| | Args: |
| | file_input: File input information (JSON string, dict, or direct file path) |
| | base_dir: Optional base directory to use for creating paper directories (for sync support) |
| | |
| | Returns: |
| | Dict: The structured content with sections and standardized text |
| | """ |
| | try: |
| | |
| | if isinstance(file_input, str): |
| | import re |
| |
|
| | file_path_match = re.search(r"`([^`]+\.md)`", file_input) |
| | if file_path_match: |
| | paper_path = file_path_match.group(1) |
| | file_input = {"paper_path": paper_path} |
| |
|
| | |
| | paper_dir = cls.extract_file_path(file_input) |
| |
|
| | |
| | if base_dir and paper_dir: |
| | |
| | if paper_dir.endswith(("deepcode_lab", "agent_folders")): |
| | paper_dir = base_dir |
| | else: |
| | |
| | paper_name = os.path.basename(paper_dir) |
| | |
| | paper_dir = os.path.join(base_dir, "papers", paper_name) |
| |
|
| | |
| | os.makedirs(paper_dir, exist_ok=True) |
| |
|
| | if not paper_dir: |
| | raise ValueError("Could not determine paper directory path") |
| |
|
| | |
| | file_path = None |
| | if isinstance(file_input, str): |
| | |
| | try: |
| | parsed_json = json.loads(file_input) |
| | if isinstance(parsed_json, dict) and "paper_path" in parsed_json: |
| | file_path = parsed_json.get("paper_path") |
| | |
| | if file_path and not os.path.exists(file_path): |
| | paper_dir = os.path.dirname(file_path) |
| | if os.path.isdir(paper_dir): |
| | file_path = cls.find_markdown_file(paper_dir) |
| | if not file_path: |
| | raise ValueError( |
| | f"No markdown file found in directory: {paper_dir}" |
| | ) |
| | else: |
| | raise ValueError("Invalid JSON format: missing paper_path") |
| | except json.JSONDecodeError: |
| | |
| | extracted_json = cls.extract_json_from_text(file_input) |
| | if extracted_json and "paper_path" in extracted_json: |
| | file_path = extracted_json.get("paper_path") |
| | |
| | if file_path and not os.path.exists(file_path): |
| | paper_dir = os.path.dirname(file_path) |
| | if os.path.isdir(paper_dir): |
| | file_path = cls.find_markdown_file(paper_dir) |
| | if not file_path: |
| | raise ValueError( |
| | f"No markdown file found in directory: {paper_dir}" |
| | ) |
| | else: |
| | |
| | |
| | if file_input.endswith( |
| | (".md", ".pdf", ".txt", ".docx", ".doc", ".html", ".htm") |
| | ): |
| | if os.path.exists(file_input): |
| | file_path = file_input |
| | else: |
| | |
| | file_path = cls.find_markdown_file(paper_dir) |
| | if not file_path: |
| | raise ValueError( |
| | f"No markdown file found in directory: {paper_dir}" |
| | ) |
| | elif os.path.exists(file_input): |
| | if os.path.isfile(file_input): |
| | file_path = file_input |
| | elif os.path.isdir(file_input): |
| | |
| | file_path = cls.find_markdown_file(file_input) |
| | if not file_path: |
| | raise ValueError( |
| | f"No markdown file found in directory: {file_input}" |
| | ) |
| | else: |
| | raise ValueError(f"Invalid input: {file_input}") |
| | else: |
| | |
| | file_path = file_input.get("paper_path") |
| | |
| | if file_path and not os.path.exists(file_path): |
| | paper_dir = os.path.dirname(file_path) |
| | if os.path.isdir(paper_dir): |
| | file_path = cls.find_markdown_file(paper_dir) |
| | if not file_path: |
| | raise ValueError( |
| | f"No markdown file found in directory: {paper_dir}" |
| | ) |
| |
|
| | if not file_path: |
| | raise ValueError("No valid file path found") |
| |
|
| | |
| | content = await cls.read_file_content(file_path) |
| |
|
| | |
| | structured_content = cls.parse_markdown_sections(content) |
| |
|
| | |
| | standardized_text = cls.standardize_output(structured_content) |
| |
|
| | return { |
| | "paper_dir": paper_dir, |
| | "file_path": file_path, |
| | "sections": structured_content, |
| | "standardized_text": standardized_text, |
| | } |
| |
|
| | except Exception as e: |
| | raise ValueError(f"Error processing file input: {str(e)}") |
| |
|
| | @staticmethod |
| | def extract_json_from_text(text: str) -> Optional[Dict]: |
| | """ |
| | Extract JSON from text that may contain markdown code blocks or other content. |
| | |
| | Args: |
| | text: Text that may contain JSON |
| | |
| | Returns: |
| | Optional[Dict]: Extracted JSON as dictionary or None if not found |
| | """ |
| | import re |
| |
|
| | |
| | json_pattern = r"```json\s*(\{.*?\})\s*```" |
| | match = re.search(json_pattern, text, re.DOTALL) |
| | if match: |
| | try: |
| | return json.loads(match.group(1)) |
| | except json.JSONDecodeError: |
| | pass |
| |
|
| | |
| | json_pattern = r"(\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\})" |
| | matches = re.findall(json_pattern, text, re.DOTALL) |
| | for match in matches: |
| | try: |
| | parsed = json.loads(match) |
| | if isinstance(parsed, dict) and "paper_path" in parsed: |
| | return parsed |
| | except json.JSONDecodeError: |
| | continue |
| |
|
| | return None |
| |
|