|
|
|
|
|
""" |
|
|
scrape_gh_docs.py |
|
|
|
|
|
Purpose: |
|
|
- Given a list of GitHub repositories (owner/repo or full GitHub URLs), locate documentation and download it. |
|
|
- Prefer fetching only documentation folders (docs, doc, documentation) when possible. |
|
|
|
|
|
Key features: |
|
|
- URL normalization: accepts either owner/repo or full https://github.com/owner/repo URLs. |
|
|
- Strategies: |
|
|
1) Contents API: detect and download only the 'docs/' folder. |
|
|
2) Sparse checkout (optional): `--prefer-sparse` to git sparse-checkout only docs folders (no REST usage). |
|
|
3) Zip fallback (optional): `--prefer-zip` to download a codeload zip (no REST usage) and extract only .md. |
|
|
4) Org heuristics and search fallback via GitHub API if direct docs folder not found. |
|
|
- Content selection: `--only-md` limits downloads/extractions to Markdown files. |
|
|
- Central config: reads YAML from `scrape_gh_docs_config.yaml` to control inputs/outputs and strategies. |
|
|
- Note: Repository metadata fetching and filtering (e.g., by age/language/topics) has been split |
|
|
into a separate pipeline step (see `data_collection_utils/fetch_gh_meta.py` and `clean/clean_meta.py`). |
|
|
- Quiet mode: `--quiet` or YAML `quiet: true` switches logging to warnings+ so tqdm progress stays visible. |
|
|
- No-fetch mode: `--no-fetch` rebuilds Parquet(s) from existing outdir without any network calls. You can also emit a per-file texts Parquet via `--texts-parquet` or YAML `texts_parquet`. |
|
|
|
|
|
Typical usage: |
|
|
uv run starting_data/scrape_gh_docs.py --config starting_data/scrape_gh_docs_config.yaml |
|
|
|
|
|
Outputs: |
|
|
- Saves files under `<outdir>/<owner>__<repo>/...`. |
|
|
- Writes a per-file Parquet to `<outdir>/cleaned_texts_on_metadata_only.parquet`. |
|
|
- Appends repos with <10 .md files to `md-failed.txt`. |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import zipfile |
|
|
import shutil |
|
|
import re |
|
|
import argparse |
|
|
from urllib.parse import urlparse |
|
|
from pathlib import Path |
|
|
from tqdm import tqdm |
|
|
import concurrent.futures |
|
|
import threading |
|
|
from typing import Optional |
|
|
from typing import Dict, Any, List |
|
|
|
|
|
import pandas as pd |
|
|
import subprocess |
|
|
import yaml |
|
|
import duckdb |
|
|
import logging |
|
|
from datetime import datetime |
|
|
import langid |
|
|
|
|
|
|
|
|
from github_api_utils import ( |
|
|
download_file, |
|
|
download_readme_to, |
|
|
get_repo_info, |
|
|
get_contents, |
|
|
get_owner_type, |
|
|
get_org_repos, |
|
|
search_repos, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def write_text(path, data): |
|
|
path.parent.mkdir(parents=True, exist_ok=True) |
|
|
path.write_text(data, encoding="utf-8") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def ensure_github_token(token_file: Optional[str]) -> str: |
|
|
"""Ensure GITHUB_TOKEN is set, optionally reading it from token_file. |
|
|
Exits the program if the token cannot be obtained. |
|
|
""" |
|
|
token = os.getenv("GITHUB_TOKEN") |
|
|
if not token and token_file: |
|
|
p = Path(token_file) |
|
|
if p.exists(): |
|
|
tok = p.read_text(encoding="utf-8").strip() |
|
|
if tok: |
|
|
os.environ["GITHUB_TOKEN"] = tok |
|
|
token = tok |
|
|
else: |
|
|
logger.error(f"Token file is empty: {p}") |
|
|
else: |
|
|
logger.error(f"Token file not found at: {p}") |
|
|
if not token: |
|
|
where = f" (from file {token_file})" if token_file else "" |
|
|
logger.error( |
|
|
f"GITHUB_TOKEN is required. Set it in the environment or provide --token-file pointing to a file containing the token{where}." |
|
|
) |
|
|
sys.exit(2) |
|
|
return token |
|
|
|
|
|
|
|
|
def append_line_threadsafe( |
|
|
path: Path, text: str, lock: Optional[threading.Lock] = None |
|
|
): |
|
|
"""Append a single line to a file in a thread-safe way.""" |
|
|
path.parent.mkdir(parents=True, exist_ok=True) |
|
|
if lock: |
|
|
lock.acquire() |
|
|
try: |
|
|
with open(path, "a", encoding="utf-8") as f: |
|
|
f.write(text) |
|
|
finally: |
|
|
if lock: |
|
|
lock.release() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def count_md_files(root_dir: Path): |
|
|
return sum(1 for p in root_dir.rglob("*.md") if p.is_file()) |
|
|
|
|
|
|
|
|
def safe_name(s: str): |
|
|
return re.sub(r"[^A-Za-z0-9._-]+", "_", s) |
|
|
|
|
|
|
|
|
def copy_tree_only_md(src_dir: Path, dst_dir: Path): |
|
|
for p in src_dir.rglob("*.md"): |
|
|
if not p.is_file(): |
|
|
continue |
|
|
rel = p.relative_to(src_dir) |
|
|
target = dst_dir / rel |
|
|
target.parent.mkdir(parents=True, exist_ok=True) |
|
|
shutil.copy2(p, target) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def collect_md_rows_for_repo_dir( |
|
|
d: Path, |
|
|
outdir: Path, |
|
|
lang_filter_value: Optional[str], |
|
|
min_text_chars_value: int, |
|
|
updated_at: str, |
|
|
) -> List[Dict[str, Any]]: |
|
|
"""Scan a single <owner>__<repo> directory for Markdown files and build row dicts. |
|
|
|
|
|
Returns a list of rows with fields: owner, repo, repo_dir, file_rel_repo, |
|
|
file_rel_outdir, size, mtime, lang, content, updated_at. |
|
|
""" |
|
|
try: |
|
|
owner, repo = d.name.split("__", 1) |
|
|
except ValueError: |
|
|
return [] |
|
|
rows: List[Dict[str, Any]] = [] |
|
|
|
|
|
md_files: List[Path] = list(d.rglob("*.md")) |
|
|
root_readmes: List[Path] = [ |
|
|
p for p in d.iterdir() if p.is_file() and p.name.lower().startswith("readme") |
|
|
] |
|
|
|
|
|
repo_lang: Optional[str] = None |
|
|
should_filter_lang = ( |
|
|
lang_filter_value is not None and str(lang_filter_value).strip() != "" |
|
|
) |
|
|
if should_filter_lang: |
|
|
src_for_lang: Optional[Path] = ( |
|
|
root_readmes[0] if root_readmes else (md_files[0] if md_files else None) |
|
|
) |
|
|
if src_for_lang is not None and src_for_lang.is_file(): |
|
|
text0 = src_for_lang.read_text(encoding="utf-8", errors="replace") |
|
|
|
|
|
lid_code, _ = langid.classify(text0) |
|
|
repo_lang = lid_code |
|
|
|
|
|
|
|
|
candidates: List[Path] = [] |
|
|
candidates.extend(md_files) |
|
|
candidates.extend(root_readmes) |
|
|
seen_paths = set() |
|
|
unique_candidates: List[Path] = [] |
|
|
for p in candidates: |
|
|
rp = p.resolve() |
|
|
if rp in seen_paths: |
|
|
continue |
|
|
seen_paths.add(rp) |
|
|
unique_candidates.append(p) |
|
|
|
|
|
for fpath in unique_candidates: |
|
|
if not fpath.is_file(): |
|
|
continue |
|
|
try: |
|
|
rel_repo = fpath.relative_to(d) |
|
|
except Exception: |
|
|
rel_repo = fpath.name |
|
|
text = fpath.read_text(encoding="utf-8", errors="replace") |
|
|
include = True |
|
|
|
|
|
is_readme = fpath.parent == d and fpath.name.lower().startswith("readme") |
|
|
if not is_readme and should_filter_lang: |
|
|
include = repo_lang == lang_filter_value |
|
|
if include: |
|
|
row = { |
|
|
"owner": owner, |
|
|
"repo": repo, |
|
|
"repo_dir": d.name, |
|
|
"file_rel_repo": str(rel_repo), |
|
|
"file_rel_outdir": str(fpath.relative_to(outdir)), |
|
|
"size": fpath.stat().st_size, |
|
|
"mtime": int(fpath.stat().st_mtime), |
|
|
"lang": repo_lang, |
|
|
"content": text, |
|
|
"updated_at": updated_at, |
|
|
} |
|
|
rows.append(row) |
|
|
return rows |
|
|
|
|
|
|
|
|
def compute_md_failed_for_existing(outdir: Path, md_failed_path: Path): |
|
|
"""Scan existing <owner>__<repo> directories and append low-md-count entries to md-failed. |
|
|
|
|
|
This mirrors the docs folder detection used in process_repo_entry(). |
|
|
""" |
|
|
repo_dirs = [ |
|
|
d |
|
|
for d in outdir.iterdir() |
|
|
if d.is_dir() and "__" in d.name and not d.name.startswith("tmp_") |
|
|
] |
|
|
with tqdm(total=len(repo_dirs), desc="Existing repos") as pbar: |
|
|
for d in repo_dirs: |
|
|
try: |
|
|
owner, repo = d.name.split("__", 1) |
|
|
except ValueError: |
|
|
pbar.update(1) |
|
|
continue |
|
|
|
|
|
if (d / "docs").exists(): |
|
|
docs_folder = d / "docs" |
|
|
else: |
|
|
found = None |
|
|
for p in d.rglob("docs"): |
|
|
if p.is_dir(): |
|
|
found = p |
|
|
break |
|
|
docs_folder = found if found else d |
|
|
md_count = count_md_files(docs_folder) |
|
|
logger.info( |
|
|
f"[no-fetch] {owner}/{repo}: {md_count} .md files in '{docs_folder.relative_to(outdir)}'" |
|
|
) |
|
|
if md_count < 10: |
|
|
append_line_threadsafe( |
|
|
md_failed_path, f"{owner}/{repo} # md-count={md_count}\n" |
|
|
) |
|
|
pbar.update(1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def download_docs_folder( |
|
|
owner, repo, docs_path, default_branch, outdir, only_md: bool = False |
|
|
): |
|
|
""" |
|
|
docs_path: path to folder in repo, e.g., 'docs' or 'website/docs' |
|
|
Will download all files under that folder recursively using the contents API. |
|
|
""" |
|
|
|
|
|
contents = get_contents(owner, repo, docs_path, ref=default_branch) |
|
|
if not contents: |
|
|
return False |
|
|
if isinstance(contents, dict) and contents.get("type") == "file": |
|
|
|
|
|
dest_root = outdir / safe_name(f"{owner}__{repo}") / docs_path |
|
|
dest_root.parent.mkdir(parents=True, exist_ok=True) |
|
|
download_file(contents["download_url"], dest_root) |
|
|
return True |
|
|
if isinstance(contents, list): |
|
|
for item in contents: |
|
|
if item["type"] == "dir": |
|
|
|
|
|
subpath = item["path"] |
|
|
download_folder_via_api( |
|
|
owner, |
|
|
repo, |
|
|
subpath, |
|
|
default_branch, |
|
|
outdir / safe_name(f"{owner}__{repo}"), |
|
|
only_md=only_md, |
|
|
) |
|
|
elif item["type"] == "file": |
|
|
if (not only_md) or item["name"].lower().endswith(".md"): |
|
|
rel = Path(item["path"]) |
|
|
dest = outdir / safe_name(f"{owner}__{repo}") / rel |
|
|
download_file(item["download_url"], dest) |
|
|
return True |
|
|
return False |
|
|
|
|
|
|
|
|
def download_folder_via_api( |
|
|
owner, repo, path, ref, dest_root: Path, only_md: bool = False |
|
|
): |
|
|
""" |
|
|
Recursively download a path via contents API and save to dest_root/path |
|
|
""" |
|
|
contents = get_contents(owner, repo, path, ref=ref) |
|
|
if not contents: |
|
|
return |
|
|
if isinstance(contents, dict) and contents.get("type") == "file": |
|
|
if (not only_md) or contents.get("name", "").lower().endswith(".md"): |
|
|
dest = dest_root / contents["path"] |
|
|
dest.parent.mkdir(parents=True, exist_ok=True) |
|
|
download_file(contents["download_url"], dest) |
|
|
return |
|
|
if isinstance(contents, list): |
|
|
for item in contents: |
|
|
if item["type"] == "dir": |
|
|
download_folder_via_api( |
|
|
owner, repo, item["path"], ref, dest_root, only_md=only_md |
|
|
) |
|
|
elif item["type"] == "file": |
|
|
if (not only_md) or item["name"].lower().endswith(".md"): |
|
|
dest = dest_root / item["path"] |
|
|
dest.parent.mkdir(parents=True, exist_ok=True) |
|
|
download_file(item["download_url"], dest) |
|
|
|
|
|
|
|
|
def download_repo_zip(owner, repo, ref, outdir, only_md: bool = False): |
|
|
""" |
|
|
Download zipball and extract into outdir/{owner}__{repo}/ |
|
|
""" |
|
|
|
|
|
ref = ref or "main" |
|
|
url = f"https://codeload.github.com/{owner}/{repo}/zip/refs/heads/{ref}" |
|
|
tmpzip = outdir / safe_name(f"tmp_{owner}_{repo}.zip") |
|
|
download_file(url, tmpzip) |
|
|
extract_to = outdir / safe_name(f"{owner}__{repo}") |
|
|
extract_to.mkdir(parents=True, exist_ok=True) |
|
|
with zipfile.ZipFile(tmpzip, "r") as z: |
|
|
if only_md: |
|
|
|
|
|
top_prefix = None |
|
|
for info in z.infolist(): |
|
|
if info.is_dir(): |
|
|
continue |
|
|
name = info.filename |
|
|
if top_prefix is None: |
|
|
top_prefix = name.split("/", 1)[0] + "/" |
|
|
rel = name[len(top_prefix) :] if name.startswith(top_prefix) else name |
|
|
if not rel.lower().endswith(".md"): |
|
|
continue |
|
|
dest = extract_to / rel |
|
|
dest.parent.mkdir(parents=True, exist_ok=True) |
|
|
with z.open(info) as src, open(dest, "wb") as dst: |
|
|
shutil.copyfileobj(src, dst) |
|
|
else: |
|
|
z.extractall(extract_to) |
|
|
|
|
|
entries = list(extract_to.iterdir()) |
|
|
if len(entries) == 1 and entries[0].is_dir(): |
|
|
inner = entries[0] |
|
|
for item in inner.iterdir(): |
|
|
shutil.move(str(item), str(extract_to)) |
|
|
inner.rmdir() |
|
|
tmpzip.unlink(missing_ok=True) |
|
|
return extract_to |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def sparse_checkout_docs( |
|
|
owner: str, |
|
|
repo: str, |
|
|
branch: str, |
|
|
outdir: Path, |
|
|
patterns: Optional[List[str]] = None, |
|
|
only_md: bool = False, |
|
|
) -> bool: |
|
|
"""Use git sparse-checkout to fetch only documentation folders for a given branch. |
|
|
patterns: list of sparse-checkout patterns. Defaults to ['docs', 'doc', 'documentation', '**/docs', '**/doc', '**/documentation']. |
|
|
Returns True if at least one documentation folder was found and copied into outdir/<owner__repo>/... |
|
|
""" |
|
|
repo_url = f"https://github.com/{owner}/{repo}.git" |
|
|
dest_root = outdir / safe_name(f"{owner}__{repo}") |
|
|
tmp_dir = outdir / safe_name(f"tmp_sparse_{owner}_{repo}_{branch}") |
|
|
if patterns is None: |
|
|
patterns = [ |
|
|
"docs", |
|
|
"doc", |
|
|
"documentation", |
|
|
"**/docs", |
|
|
"**/doc", |
|
|
"**/documentation", |
|
|
] |
|
|
|
|
|
|
|
|
if tmp_dir.exists(): |
|
|
shutil.rmtree(tmp_dir, ignore_errors=True) |
|
|
|
|
|
try: |
|
|
|
|
|
subprocess.run( |
|
|
[ |
|
|
"git", |
|
|
"clone", |
|
|
"--depth", |
|
|
"1", |
|
|
"--filter=blob:none", |
|
|
"--sparse", |
|
|
"-b", |
|
|
branch, |
|
|
repo_url, |
|
|
str(tmp_dir), |
|
|
], |
|
|
check=True, |
|
|
stdout=subprocess.PIPE, |
|
|
stderr=subprocess.PIPE, |
|
|
) |
|
|
|
|
|
|
|
|
subprocess.run( |
|
|
["git", "-C", str(tmp_dir), "sparse-checkout", "init", "--no-cone"], |
|
|
check=True, |
|
|
stdout=subprocess.PIPE, |
|
|
stderr=subprocess.PIPE, |
|
|
) |
|
|
subprocess.run( |
|
|
[ |
|
|
"git", |
|
|
"-C", |
|
|
str(tmp_dir), |
|
|
"sparse-checkout", |
|
|
"set", |
|
|
*patterns, |
|
|
], |
|
|
check=True, |
|
|
stdout=subprocess.PIPE, |
|
|
stderr=subprocess.PIPE, |
|
|
) |
|
|
|
|
|
|
|
|
matched_dirs: List[Path] = [] |
|
|
for pat in ["docs", "doc", "documentation"]: |
|
|
for p in tmp_dir.rglob(pat): |
|
|
if p.is_dir(): |
|
|
matched_dirs.append(p) |
|
|
|
|
|
if not matched_dirs: |
|
|
return False |
|
|
|
|
|
|
|
|
for d in matched_dirs: |
|
|
rel = d.relative_to(tmp_dir) |
|
|
target_dir = dest_root / rel |
|
|
target_dir.parent.mkdir(parents=True, exist_ok=True) |
|
|
if target_dir.exists(): |
|
|
shutil.rmtree(target_dir) |
|
|
copy_tree_only_md(d, target_dir) |
|
|
return True |
|
|
finally: |
|
|
|
|
|
if tmp_dir.exists(): |
|
|
shutil.rmtree(tmp_dir, ignore_errors=True) |
|
|
|
|
|
|
|
|
|
|
|
DOC_NAME_PATTERNS = [ |
|
|
re.compile(r"^docs?$", re.I), |
|
|
re.compile(r"^documentation$", re.I), |
|
|
re.compile(r"^doc$", re.I), |
|
|
re.compile(r".*[-_.]docs?$", re.I), |
|
|
re.compile(r"^docs[-_].*", re.I), |
|
|
re.compile(r".*doc.*", re.I), |
|
|
] |
|
|
|
|
|
|
|
|
def find_docs_repo_in_org(org_repos, project_repo_name=None): |
|
|
|
|
|
candidates = [] |
|
|
for r in org_repos: |
|
|
name = r.get("name", "") |
|
|
for p in DOC_NAME_PATTERNS: |
|
|
if p.fullmatch(name) or p.match(name): |
|
|
candidates.append(r) |
|
|
break |
|
|
|
|
|
if project_repo_name: |
|
|
project_lower = project_repo_name.lower() |
|
|
scored = [] |
|
|
for r in candidates: |
|
|
name = r.get("name", "").lower() |
|
|
score = 0 |
|
|
if project_lower in name: |
|
|
score += 10 |
|
|
if name.startswith("docs") or name.endswith("docs"): |
|
|
score += 1 |
|
|
scored.append((score, r)) |
|
|
scored.sort(reverse=True, key=lambda x: x[0]) |
|
|
if scored: |
|
|
return scored[0][1] |
|
|
if candidates: |
|
|
return candidates[0] |
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _parse_owner_repo(line: str) -> Optional[tuple[str, str]]: |
|
|
s = line.strip() |
|
|
if not s: |
|
|
return None |
|
|
if s.startswith("http://") or s.startswith("https://"): |
|
|
p = urlparse(s) |
|
|
if p.netloc != "github.com": |
|
|
return None |
|
|
parts = [part for part in p.path.split("/") if part] |
|
|
if len(parts) < 2: |
|
|
return None |
|
|
owner, repo = parts[0], parts[1] |
|
|
if repo.endswith(".git"): |
|
|
repo = repo[:-4] |
|
|
return owner, repo |
|
|
|
|
|
if "/" in s: |
|
|
owner, repo = s.split("/", 1) |
|
|
return owner, repo |
|
|
return None |
|
|
|
|
|
|
|
|
def process_repo_entry( |
|
|
owner_repo, |
|
|
outdir: Path, |
|
|
md_failed_path: Path, |
|
|
dry_run=False, |
|
|
lock: Optional[threading.Lock] = None, |
|
|
prefer_zip: bool = False, |
|
|
prefer_sparse: bool = False, |
|
|
only_md: bool = False, |
|
|
resume: bool = True, |
|
|
): |
|
|
owner_repo = owner_repo.strip() |
|
|
if not owner_repo or owner_repo.startswith("#"): |
|
|
return None |
|
|
parsed = _parse_owner_repo(owner_repo) |
|
|
if not parsed: |
|
|
logger.warning( |
|
|
f"invalid line (expected owner/repo or GitHub URL): '{owner_repo}'" |
|
|
) |
|
|
return None |
|
|
owner, repo = parsed |
|
|
logger.info(f"Processing {owner}/{repo}") |
|
|
|
|
|
result: Dict[str, Any] = { |
|
|
"owner": owner, |
|
|
"repo": repo, |
|
|
"default_branch": None, |
|
|
"method": None, |
|
|
"docs_found": False, |
|
|
"docs_folder": None, |
|
|
"md_count": None, |
|
|
"status": "ok", |
|
|
"note": None, |
|
|
"docs_found_in": None, |
|
|
"readme_found": False, |
|
|
"readme_filename": None, |
|
|
} |
|
|
got_any = False |
|
|
default_branch = None |
|
|
|
|
|
|
|
|
repo_saved_root = outdir / safe_name(f"{owner}__{repo}") |
|
|
if resume and repo_saved_root.exists(): |
|
|
|
|
|
if (repo_saved_root / "docs").exists(): |
|
|
docs_folder = repo_saved_root / "docs" |
|
|
else: |
|
|
found = None |
|
|
for p in repo_saved_root.rglob("docs"): |
|
|
if p.is_dir(): |
|
|
found = p |
|
|
break |
|
|
docs_folder = found if found else repo_saved_root |
|
|
|
|
|
try: |
|
|
info = download_readme_to(owner, repo, repo_saved_root, ref=None) |
|
|
if info: |
|
|
result["readme_found"] = True |
|
|
result["readme_filename"] = info.get("name") |
|
|
except Exception: |
|
|
pass |
|
|
md_count = count_md_files(docs_folder) |
|
|
result["default_branch"] = None |
|
|
result["method"] = "resume-existing" |
|
|
result["docs_found_in"] = None |
|
|
result["docs_found"] = True |
|
|
assert docs_folder.is_relative_to(outdir) |
|
|
result["docs_folder"] = str(docs_folder.relative_to(outdir)) |
|
|
result["md_count"] = int(md_count) |
|
|
if md_count < 10: |
|
|
append_line_threadsafe( |
|
|
md_failed_path, f"{owner}/{repo} # md-count={md_count}\n", lock |
|
|
) |
|
|
result["status"] = "low-md-count" |
|
|
return result |
|
|
|
|
|
if prefer_sparse: |
|
|
|
|
|
for branch_guess in ("main", "master"): |
|
|
try: |
|
|
logger.info( |
|
|
f"prefer_sparse: attempting docs/ for {owner}/{repo}@{branch_guess} via git sparse-checkout" |
|
|
) |
|
|
if not dry_run: |
|
|
ok = sparse_checkout_docs( |
|
|
owner, repo, branch_guess, outdir, only_md=only_md |
|
|
) |
|
|
else: |
|
|
ok = True |
|
|
if ok: |
|
|
default_branch = branch_guess |
|
|
result["default_branch"] = branch_guess |
|
|
result["method"] = "sparse_docs" |
|
|
result["docs_found_in"] = ( |
|
|
f"https://github.com/{owner}/{repo}/tree/{branch_guess}/docs" |
|
|
) |
|
|
got_any = True |
|
|
break |
|
|
except FileNotFoundError: |
|
|
logger.error("'git' not found. Install git to use --prefer-sparse.") |
|
|
break |
|
|
except subprocess.CalledProcessError as e: |
|
|
logger.warning( |
|
|
f"prefer_sparse: git error for {owner}/{repo}@{branch_guess}: {e}" |
|
|
) |
|
|
if not got_any: |
|
|
logger.info( |
|
|
f"prefer_sparse: docs/ not found via sparse-checkout for {owner}/{repo}. Continuing with other strategies." |
|
|
) |
|
|
|
|
|
if prefer_zip and not got_any: |
|
|
|
|
|
for branch_guess in ("main", "master"): |
|
|
try: |
|
|
logger.info( |
|
|
f"prefer_zip: attempting {owner}/{repo}@{branch_guess} via codeload" |
|
|
) |
|
|
if not dry_run: |
|
|
download_repo_zip( |
|
|
owner, repo, branch_guess, outdir, only_md=only_md |
|
|
) |
|
|
default_branch = branch_guess |
|
|
result["default_branch"] = branch_guess |
|
|
result["method"] = "zip_whole_repo" |
|
|
result["docs_found_in"] = f"https://github.com/{owner}/{repo}" |
|
|
got_any = True |
|
|
break |
|
|
except Exception as e: |
|
|
logger.warning( |
|
|
f"prefer_zip: failed to download {owner}/{repo}@{branch_guess}: {e}" |
|
|
) |
|
|
if not got_any: |
|
|
logger.warning( |
|
|
f"prefer_zip failed for {owner}/{repo} (both main and master). Marking as docs-not-found." |
|
|
) |
|
|
append_line_threadsafe( |
|
|
md_failed_path, f"{owner}/{repo} # zip-failed\n", lock |
|
|
) |
|
|
result["status"] = "docs-not-found" |
|
|
return result |
|
|
if not got_any: |
|
|
|
|
|
repo_json = get_repo_info(owner, repo) |
|
|
if not repo_json: |
|
|
logger.error(f"Cannot fetch repo metadata for {owner}/{repo}") |
|
|
append_line_threadsafe( |
|
|
md_failed_path, f"{owner}/{repo} # metadata-failed\n", lock |
|
|
) |
|
|
return { |
|
|
"owner": owner, |
|
|
"repo": repo, |
|
|
"default_branch": None, |
|
|
"method": None, |
|
|
"docs_found": False, |
|
|
"docs_folder": None, |
|
|
"md_count": None, |
|
|
"status": "metadata-failed", |
|
|
"note": "failed to fetch repo metadata", |
|
|
} |
|
|
default_branch = repo_json.get("default_branch", "main") |
|
|
result["default_branch"] = default_branch |
|
|
|
|
|
|
|
|
docs_path = "docs" |
|
|
logger.info( |
|
|
f"Checking for '{docs_path}/' in {owner}/{repo} (branch {default_branch})" |
|
|
) |
|
|
contents = ( |
|
|
None |
|
|
if (prefer_zip or prefer_sparse) |
|
|
else get_contents(owner, repo, docs_path, ref=default_branch) |
|
|
) |
|
|
saved_root = outdir / safe_name(f"{owner}__{repo}") |
|
|
if contents: |
|
|
if isinstance(contents, list): |
|
|
logger.info(f"Found docs/ directory in {owner}/{repo}. Downloading...") |
|
|
if not dry_run: |
|
|
download_folder_via_api( |
|
|
owner, repo, docs_path, default_branch, saved_root, only_md=only_md |
|
|
) |
|
|
got_any = True |
|
|
result["method"] = "docs_folder_in_repo" |
|
|
result["docs_found_in"] = ( |
|
|
f"https://github.com/{owner}/{repo}/tree/{default_branch}/{docs_path}" |
|
|
) |
|
|
elif isinstance(contents, dict) and contents.get("type") == "file": |
|
|
logger.info("Found file at docs (single-file). Downloading...") |
|
|
if not dry_run: |
|
|
download_folder_via_api( |
|
|
owner, repo, docs_path, default_branch, saved_root, only_md=only_md |
|
|
) |
|
|
got_any = True |
|
|
result["method"] = "docs_file_in_repo" |
|
|
result["docs_found_in"] = ( |
|
|
f"https://github.com/{owner}/{repo}/blob/{default_branch}/{docs_path}" |
|
|
) |
|
|
else: |
|
|
|
|
|
pass |
|
|
else: |
|
|
|
|
|
pass |
|
|
|
|
|
if not got_any: |
|
|
|
|
|
org_repos = get_org_repos(owner) |
|
|
candidate = find_docs_repo_in_org(org_repos, project_repo_name=repo) |
|
|
if candidate: |
|
|
cand_name = candidate["name"] |
|
|
logger.info( |
|
|
f"Found candidate docs repo in org: {owner}/{cand_name} -> will download repo zip" |
|
|
) |
|
|
if not dry_run: |
|
|
download_repo_zip( |
|
|
owner, cand_name, candidate.get("default_branch", "main"), outdir |
|
|
) |
|
|
got_any = True |
|
|
result["method"] = "org_docs_repo_zip" |
|
|
result["docs_found_in"] = f"https://github.com/{owner}/{cand_name}" |
|
|
else: |
|
|
|
|
|
owner_type = get_owner_type(owner) |
|
|
scope = f"org:{owner}" if owner_type == "Organization" else f"user:{owner}" |
|
|
q = f"{repo} docs in:name {scope}" |
|
|
logger.info(f"Trying fallback search: '{q}'") |
|
|
items = search_repos(q, per_page=5) |
|
|
if items: |
|
|
first = items[0] |
|
|
logger.info( |
|
|
f"Fallback search found {first['full_name']}. Downloading repo zip" |
|
|
) |
|
|
if not dry_run: |
|
|
download_repo_zip( |
|
|
first["owner"]["login"], |
|
|
first["name"], |
|
|
first.get("default_branch", "main"), |
|
|
outdir, |
|
|
) |
|
|
got_any = True |
|
|
result["method"] = "search_repo_zip" |
|
|
result["docs_found_in"] = first.get( |
|
|
"html_url", |
|
|
f"https://github.com/{first['owner']['login']}/{first['name']}", |
|
|
) |
|
|
|
|
|
if not got_any: |
|
|
logger.warning( |
|
|
f"No docs found for {owner}/{repo} after heuristics. Marking as md-failed." |
|
|
) |
|
|
append_line_threadsafe( |
|
|
md_failed_path, f"{owner}/{repo} # docs-not-found\n", lock |
|
|
) |
|
|
result["status"] = "docs-not-found" |
|
|
result["docs_found"] = False |
|
|
return result |
|
|
|
|
|
|
|
|
|
|
|
repo_saved_root = outdir / safe_name(f"{owner}__{repo}") |
|
|
|
|
|
if (repo_saved_root / "docs").exists(): |
|
|
docs_folder = repo_saved_root / "docs" |
|
|
else: |
|
|
|
|
|
found = None |
|
|
for p in repo_saved_root.rglob("docs"): |
|
|
if p.is_dir(): |
|
|
found = p |
|
|
break |
|
|
docs_folder = found if found else repo_saved_root |
|
|
|
|
|
try: |
|
|
info = download_readme_to(owner, repo, repo_saved_root, ref=default_branch) |
|
|
if info: |
|
|
result["readme_found"] = True |
|
|
result["readme_filename"] = info.get("name") |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
md_count = count_md_files(docs_folder) |
|
|
logger.info( |
|
|
f"Found {md_count} .md files for {owner}/{repo} in '{docs_folder.relative_to(outdir)}'" |
|
|
) |
|
|
result["docs_found"] = True |
|
|
assert docs_folder.is_relative_to(outdir) |
|
|
result["docs_folder"] = str(docs_folder.relative_to(outdir)) |
|
|
result["md_count"] = int(md_count) |
|
|
if md_count < 10: |
|
|
append_line_threadsafe( |
|
|
md_failed_path, f"{owner}/{repo} # md-count={md_count}\n", lock |
|
|
) |
|
|
result["status"] = "low-md-count" |
|
|
|
|
|
return result |
|
|
|
|
|
|
|
|
def _init_duckdb(con): |
|
|
con.execute( |
|
|
""" |
|
|
CREATE TABLE IF NOT EXISTS texts ( |
|
|
owner TEXT, |
|
|
repo TEXT, |
|
|
repo_dir TEXT, |
|
|
file_rel_repo TEXT, |
|
|
file_rel_outdir TEXT, |
|
|
size BIGINT, |
|
|
mtime BIGINT, |
|
|
lang TEXT, |
|
|
content TEXT, |
|
|
updated_at TEXT |
|
|
); |
|
|
""" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser( |
|
|
description="Scrape docs/ folders for a list of GitHub repos (configuration via YAML only)." |
|
|
) |
|
|
parser.add_argument( |
|
|
"--no-fetch", |
|
|
action="store_true", |
|
|
help="Do not perform any network downloads; only scan existing outdir to build Parquet and md-failed", |
|
|
) |
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
cfg: Dict[str, Any] = {} |
|
|
cfg_path = Path(__file__).with_name("scrape_gh_docs_config.yaml") |
|
|
if cfg_path.exists(): |
|
|
cfg = yaml.safe_load(cfg_path.read_text(encoding="utf-8")) or {} |
|
|
|
|
|
|
|
|
|
|
|
def _resolve_cfg_path(val): |
|
|
if val is None: |
|
|
return None |
|
|
p = Path(val) |
|
|
if not p.is_absolute() and cfg_path is not None: |
|
|
p = (cfg_path.parent / p).resolve() |
|
|
return str(p) |
|
|
|
|
|
outdir_value = _resolve_cfg_path(cfg.get("outdir", "output")) |
|
|
md_failed_value = _resolve_cfg_path(cfg.get("md_failed", "md-failed.txt")) |
|
|
dry_run_value = bool(cfg.get("dry_run", False)) |
|
|
|
|
|
|
|
|
legacy_workers = cfg.get("workers") |
|
|
scrape_workers_value = int( |
|
|
cfg.get("scrape_workers", legacy_workers if legacy_workers is not None else 4) |
|
|
) |
|
|
rebuild_workers_value = int( |
|
|
cfg.get("rebuild_workers", legacy_workers if legacy_workers is not None else 4) |
|
|
) |
|
|
texts_parquet_value = _resolve_cfg_path(cfg.get("texts_parquet")) |
|
|
duckdb_path_value = _resolve_cfg_path(cfg.get("duckdb_path")) |
|
|
token_file_value = _resolve_cfg_path(cfg.get("token_file")) |
|
|
prefer_zip_value = bool(cfg.get("prefer_zip", False)) |
|
|
prefer_sparse_value = bool(cfg.get("prefer_sparse", False)) |
|
|
only_md_value = bool(cfg.get("only_md", False)) |
|
|
resume_value = bool(cfg.get("resume", True)) |
|
|
quiet_value = bool(cfg.get("quiet", False)) |
|
|
|
|
|
no_fetch_value = bool(args.no_fetch or cfg.get("no_fetch", False)) |
|
|
lang_filter_value = cfg.get("lang_filter", "en") |
|
|
min_text_chars_value = int(cfg.get("min_text_chars", 200)) |
|
|
postprocess_executor_value = str(cfg.get("postprocess_executor", "thread")).lower() |
|
|
|
|
|
def _resolve_cfg_paths(val): |
|
|
if val is None: |
|
|
return [] |
|
|
if isinstance(val, (list, tuple)): |
|
|
return [_resolve_cfg_path(v) for v in val if v is not None] |
|
|
return [_resolve_cfg_path(val)] |
|
|
|
|
|
input_parquet_values = _resolve_cfg_paths(cfg.get("input_parquet")) |
|
|
|
|
|
|
|
|
setup_logging(quiet=quiet_value) |
|
|
|
|
|
|
|
|
if not no_fetch_value: |
|
|
ensure_github_token(token_file_value) |
|
|
|
|
|
outdir = Path(outdir_value) |
|
|
outdir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
duckdb_path = ( |
|
|
Path(duckdb_path_value) if duckdb_path_value else (outdir / "gh_docs.duckdb") |
|
|
) |
|
|
con = duckdb.connect(str(duckdb_path)) |
|
|
_init_duckdb(con) |
|
|
logger.info(f"Opened DuckDB at '{duckdb_path}'") |
|
|
|
|
|
md_failed_path = Path(md_failed_value) |
|
|
|
|
|
md_failed_path.write_text("") |
|
|
|
|
|
if no_fetch_value: |
|
|
lines: List[str] = [] |
|
|
else: |
|
|
lines: List[str] = [] |
|
|
if not input_parquet_values: |
|
|
logger.error( |
|
|
"'input_parquet' is required. Configure one or more Parquet files with a 'link' column in scrape_gh_docs_config.yaml." |
|
|
) |
|
|
sys.exit(2) |
|
|
|
|
|
|
|
|
seen_canonical = set() |
|
|
seen_raw_invalid = set() |
|
|
for pth in input_parquet_values: |
|
|
df = pd.read_parquet(pth) |
|
|
assert "link" in df.columns, f"Parquet {pth} must contain 'link' column" |
|
|
for u in df["link"].tolist(): |
|
|
s = str(u).strip() |
|
|
if not s: |
|
|
continue |
|
|
parsed = _parse_owner_repo(s) |
|
|
if parsed: |
|
|
owner, repo = parsed |
|
|
key = f"{owner.lower()}/{repo.lower()}" |
|
|
if key in seen_canonical: |
|
|
continue |
|
|
seen_canonical.add(key) |
|
|
|
|
|
lines.append(f"{owner}/{repo}") |
|
|
else: |
|
|
|
|
|
if s in seen_raw_invalid: |
|
|
continue |
|
|
seen_raw_invalid.add(s) |
|
|
lines.append(s) |
|
|
|
|
|
logger.info( |
|
|
f"Will process {len(lines)} repo entries (fetch={'off' if no_fetch_value else 'on'}), scrape_workers={scrape_workers_value}, rebuild_workers={rebuild_workers_value}" |
|
|
) |
|
|
|
|
|
md_failed_lock = threading.Lock() |
|
|
results_lock = threading.Lock() |
|
|
results: List[Dict[str, Any]] = [] |
|
|
duckdb_lock = threading.Lock() |
|
|
|
|
|
|
|
|
run_ts = datetime.utcnow().isoformat() |
|
|
with tqdm(total=len(lines), desc="Repos") as pbar: |
|
|
|
|
|
def _run(lr: str): |
|
|
try: |
|
|
res = process_repo_entry( |
|
|
lr, |
|
|
outdir, |
|
|
md_failed_path, |
|
|
dry_run=dry_run_value, |
|
|
lock=md_failed_lock, |
|
|
prefer_zip=prefer_zip_value, |
|
|
prefer_sparse=prefer_sparse_value, |
|
|
only_md=only_md_value, |
|
|
resume=resume_value, |
|
|
) |
|
|
if res is not None: |
|
|
with results_lock: |
|
|
results.append(res) |
|
|
|
|
|
owner = res.get("owner") |
|
|
repo = res.get("repo") |
|
|
if owner and repo and res.get("docs_found"): |
|
|
d = outdir / safe_name(f"{owner}__{repo}") |
|
|
if d.exists() and d.is_dir(): |
|
|
rows_one = collect_md_rows_for_repo_dir( |
|
|
d, |
|
|
outdir, |
|
|
lang_filter_value, |
|
|
min_text_chars_value, |
|
|
run_ts, |
|
|
) |
|
|
if rows_one: |
|
|
cols = [ |
|
|
"owner", |
|
|
"repo", |
|
|
"repo_dir", |
|
|
"file_rel_repo", |
|
|
"file_rel_outdir", |
|
|
"size", |
|
|
"mtime", |
|
|
"lang", |
|
|
"content", |
|
|
"updated_at", |
|
|
] |
|
|
df_one = pd.DataFrame(rows_one, columns=cols) |
|
|
with duckdb_lock: |
|
|
con.execute("BEGIN") |
|
|
con.execute( |
|
|
"DELETE FROM texts WHERE owner = ? AND repo = ?", |
|
|
[owner, repo], |
|
|
) |
|
|
con.register("df_txt_one", df_one) |
|
|
con.execute( |
|
|
"INSERT INTO texts SELECT * FROM df_txt_one" |
|
|
) |
|
|
con.unregister("df_txt_one") |
|
|
con.execute("COMMIT") |
|
|
logger.info( |
|
|
f"[incremental] {owner}/{repo}: inserted {len(rows_one)} rows into DuckDB" |
|
|
) |
|
|
except Exception as e: |
|
|
logger.error(f"Exception while processing {lr}: {e}") |
|
|
append_line_threadsafe( |
|
|
md_failed_path, f"{lr} # exception\n", md_failed_lock |
|
|
) |
|
|
with results_lock: |
|
|
results.append( |
|
|
{ |
|
|
"owner": lr.split("/", 1)[0] if "/" in lr else None, |
|
|
"repo": lr.split("/", 1)[1] if "/" in lr else lr, |
|
|
"default_branch": None, |
|
|
"method": None, |
|
|
"docs_found": False, |
|
|
"docs_folder": None, |
|
|
"md_count": None, |
|
|
"status": "exception", |
|
|
"note": str(e), |
|
|
} |
|
|
) |
|
|
|
|
|
with concurrent.futures.ThreadPoolExecutor( |
|
|
max_workers=scrape_workers_value |
|
|
) as executor: |
|
|
futures = [executor.submit(_run, lr) for lr in lines] |
|
|
for _ in concurrent.futures.as_completed(futures): |
|
|
pbar.update(1) |
|
|
|
|
|
|
|
|
if no_fetch_value: |
|
|
compute_md_failed_for_existing(outdir, md_failed_path) |
|
|
|
|
|
|
|
|
repo_dirs = [ |
|
|
d |
|
|
for d in outdir.iterdir() |
|
|
if d.is_dir() and "__" in d.name and not d.name.startswith("tmp_") |
|
|
] |
|
|
logger.info( |
|
|
f"Starting post-processing scan of {len(repo_dirs)} repos under '{outdir}' to rebuild per-file dataset" |
|
|
) |
|
|
|
|
|
cols = [ |
|
|
"owner", |
|
|
"repo", |
|
|
"repo_dir", |
|
|
"file_rel_repo", |
|
|
"file_rel_outdir", |
|
|
"size", |
|
|
"mtime", |
|
|
"lang", |
|
|
"content", |
|
|
"updated_at", |
|
|
] |
|
|
total_inserted = 0 |
|
|
logger.info("Clearing DuckDB 'texts' table for full rebuild") |
|
|
with duckdb_lock: |
|
|
con.execute("DELETE FROM texts") |
|
|
with tqdm(total=len(repo_dirs), desc="Collecting per-file rows (repos)") as pbar: |
|
|
executor_cls = ( |
|
|
concurrent.futures.ProcessPoolExecutor |
|
|
if postprocess_executor_value == "process" |
|
|
else concurrent.futures.ThreadPoolExecutor |
|
|
) |
|
|
logger.info( |
|
|
f"Post-processing executor: {executor_cls.__name__} with workers={rebuild_workers_value}" |
|
|
) |
|
|
with executor_cls(max_workers=rebuild_workers_value) as executor: |
|
|
futures = [ |
|
|
executor.submit( |
|
|
collect_md_rows_for_repo_dir, |
|
|
d, |
|
|
outdir, |
|
|
lang_filter_value, |
|
|
min_text_chars_value, |
|
|
run_ts, |
|
|
) |
|
|
for d in repo_dirs |
|
|
] |
|
|
for fut in concurrent.futures.as_completed(futures): |
|
|
try: |
|
|
rows = fut.result() |
|
|
except Exception as e: |
|
|
logger.error(f"Error while scanning repo dir: {e}") |
|
|
rows = [] |
|
|
if rows: |
|
|
df_chunk = pd.DataFrame(rows, columns=cols) |
|
|
with duckdb_lock: |
|
|
con.register("df_txt_chunk", df_chunk) |
|
|
con.execute("INSERT INTO texts SELECT * FROM df_txt_chunk") |
|
|
con.unregister("df_txt_chunk") |
|
|
total_inserted += len(rows) |
|
|
pbar.set_postfix_str(f"rows={total_inserted}") |
|
|
pbar.update(1) |
|
|
|
|
|
|
|
|
texts_parquet_path = ( |
|
|
Path(texts_parquet_value) |
|
|
if texts_parquet_value |
|
|
else (outdir / "cleaned_texts_on_metadata_only.parquet") |
|
|
) |
|
|
try: |
|
|
texts_parquet_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
logger.info( |
|
|
f"Exporting DuckDB 'texts' table to Parquet at '{texts_parquet_path}' ..." |
|
|
) |
|
|
|
|
|
out_path_sql = str(texts_parquet_path).replace("'", "''") |
|
|
con.execute(f"COPY (SELECT * FROM texts) TO '{out_path_sql}' (FORMAT PARQUET)") |
|
|
logger.info( |
|
|
f"Wrote per-file dataset to {texts_parquet_path} (rows={total_inserted})" |
|
|
) |
|
|
except Exception as e: |
|
|
logger.error(f"Failed to export texts to Parquet: {e}") |
|
|
|
|
|
|
|
|
try: |
|
|
con.close() |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
logger.info("Done. Check output directory and md-failed.txt") |
|
|
|
|
|
|
|
|
class TqdmLoggingHandler(logging.Handler): |
|
|
def emit(self, record): |
|
|
try: |
|
|
msg = self.format(record) |
|
|
tqdm.write(msg) |
|
|
except Exception: |
|
|
|
|
|
sys.stderr.write(str(record.getMessage()) + "\n") |
|
|
|
|
|
|
|
|
logger = logging.getLogger("scrape_gh_docs") |
|
|
|
|
|
|
|
|
def setup_logging(quiet: bool = False): |
|
|
logger.setLevel(logging.DEBUG) |
|
|
logger.propagate = False |
|
|
|
|
|
logger.handlers.clear() |
|
|
handler = TqdmLoggingHandler() |
|
|
handler.setFormatter(logging.Formatter("[%(levelname)s] %(message)s")) |
|
|
handler.setLevel(logging.WARNING if quiet else logging.INFO) |
|
|
logger.addHandler(handler) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|