gooddocs-v0 / data_collection_utils /top_1000_repos.py
MRiabov's picture
feat: add DuckDB persistence and optimize GraphQL batching for repo scraping
5d305bb
from pathlib import Path
from playwright.sync_api import sync_playwright
from urllib.parse import urlparse
from typing import List, Optional, Dict, Any
import yaml
import pandas as pd
from github_api_utils import fetch_repos_metadata_graphql
from dotenv import load_dotenv
URL = "https://top1000repos.com/"
def canonical_repo_url(url: str) -> Optional[str]:
p = urlparse(url)
if p.scheme != "https" or p.netloc != "github.com":
return None
parts = [part for part in p.path.split("/") if part]
if len(parts) < 2:
return None
owner, repo = parts[0], parts[1]
# Exclude non-repo top-level paths like topics, orgs, marketplace, etc.
blocked_owners = {
"topics",
"collections",
"orgs",
"marketplace",
"features",
"pricing",
"about",
"site",
"blog",
"events",
"apps",
"sponsors",
"login",
"join",
"explore",
"contact",
"settings",
"search",
"codespaces",
}
if owner in blocked_owners:
return None
return f"https://github.com/{owner}/{repo}"
def normalize_github_repo_links(links: List[str]) -> List[str]:
repos = set()
for u in links:
cu = canonical_repo_url(u)
if cu is not None:
repos.add(cu)
return sorted(repos)
# removed: resolve_to_original_repo (replaced by GraphQL-based mapping)
def parse_owner_repo(url: str) -> Optional[tuple[str, str]]:
p = urlparse(url)
parts = [part for part in p.path.split("/") if part]
if len(parts) < 2:
return None
return parts[0], parts[1]
def map_to_original_repos_graphql(
urls: List[str], *, batch_size: int = 30, topics_limit: int = 0
) -> List[str]:
"""Resolve forks to their parent repos using batched GraphQL metadata requests.
- For each input repo URL, if it's a fork and parent_url is available, map to parent.
- Returns the sorted unique list of canonical GitHub URLs.
- Queries are sent in chunks of ``batch_size`` to avoid oversized GraphQL payloads.
"""
pairs: List[tuple[str, str]] = []
for u in urls:
pr = parse_owner_repo(u)
if pr is None:
continue
pairs.append(pr)
# Batch query to avoid 502s on oversized GraphQL requests
meta: Dict[str, Dict[str, Any]] = {}
for i in range(0, len(pairs), batch_size):
chunk = pairs[i : i + batch_size]
mm = fetch_repos_metadata_graphql(chunk, topics_limit=topics_limit)
if mm:
meta.update(mm)
out: set[str] = set()
for owner, repo in pairs:
key = f"{owner}/{repo}"
m = meta.get(key) or {}
parent_url = m.get("parent_url")
if m.get("is_fork") and isinstance(parent_url, str):
cu = canonical_repo_url(parent_url)
if cu is not None:
out.add(cu)
continue
out.add(f"https://github.com/{owner}/{repo}")
return sorted(out)
def main() -> None:
# Load token from .env for GraphQL
load_dotenv()
print("[top_1000_repos] Starting script execution. This may take a minute or so...")
# Load YAML config next to this script if present
cfg: Dict[str, Any] = {}
cfg_path = Path(__file__).with_name("top_1000_repos_config.yaml")
if cfg_path.exists():
cfg = yaml.safe_load(cfg_path.read_text(encoding="utf-8")) or {}
def _resolve_cfg_path(val: Optional[str]) -> Optional[Path]:
if val is None:
return None
p = Path(val)
if not p.is_absolute():
p = (cfg_path.parent / p).resolve()
return p
project_root = Path(__file__).resolve().parents[1]
out_html = _resolve_cfg_path(cfg.get("out_html")) or Path(__file__).with_name(
"Top 1000 GitHub repositories, updated daily, all on one page..html"
)
out_links = _resolve_cfg_path(cfg.get("out_links")) or (
project_root / "github_links.txt"
)
out_parquet = _resolve_cfg_path(cfg.get("out_parquet")) or Path(__file__).with_name(
"top-1000-repos.parquet"
)
headless = bool(cfg.get("headless", True))
# Scrolling config
scroll_max_iters = int(cfg.get("scroll_max_iters", 200))
scroll_pause_ms = int(cfg.get("scroll_pause_ms", 300))
stable_threshold = int(cfg.get("stable_threshold", 10))
min_anchors = int(cfg.get("min_anchors", 1500))
# GraphQL config
graphql_batch_size = int(cfg.get("graphql_batch_size", 30))
topics_limit = int(cfg.get("topics_limit", 20))
fork_resolution = bool(cfg.get("fork_resolution", True))
print("[top_1000_repos] Launching Playwright browser...")
with sync_playwright() as p:
browser = p.chromium.launch(headless=headless)
context = browser.new_context()
page = context.new_page()
page.goto(URL, wait_until="networkidle")
# Wait until at least one GitHub link is present in the DOM
page.wait_for_selector('a[href*="https://github.com/"]', timeout=30000)
# Auto-scroll to force lazy loading/virtualized list to render all items
def _scroll_all(
max_iters: int = scroll_max_iters, pause_ms: int = scroll_pause_ms
) -> None:
prev_count = 0
stable = 0
for _ in range(max_iters):
page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
page.wait_for_timeout(pause_ms)
count = page.eval_on_selector_all(
'a[href^="https://github.com/"]', "els => els.length"
)
if count <= prev_count:
stable += 1
else:
stable = 0
prev_count = count
# Stop after several iterations without growth or when clearly above 1000 anchors
if stable >= stable_threshold or prev_count >= min_anchors:
break
_scroll_all()
print("[top_1000_repos] Scrolling completed. Extracting links...")
# Save rendered HTML
html = page.content()
out_html.write_text(html, encoding="utf-8")
print(f"[top_1000_repos] Saved rendered HTML to {out_html}")
# Extract canonical GitHub repo URLs from the DOM after full scroll
links = page.eval_on_selector_all(
'a[href*="https://github.com/"]',
"els => Array.from(new Set(els.map(e => e.href))).sort()",
)
repo_links = normalize_github_repo_links(links)
# Optionally map any fork links to their original repositories and deduplicate
if fork_resolution:
repo_links = map_to_original_repos_graphql(
repo_links, batch_size=graphql_batch_size, topics_limit=0
)
# Persist github_links.txt for visibility/debug (even if not used downstream)
print(
f"[top_1000_repos] Writing {len(repo_links)} repository links to {out_links}"
)
with out_links.open("w", encoding="utf-8") as f:
f.write("\n".join(repo_links) + "\n")
# Enrich via GraphQL in batches
pairs: List[tuple[str, str]] = []
for u in repo_links:
pr = parse_owner_repo(u)
if pr is not None:
pairs.append(pr)
meta_map: Dict[str, Dict[str, Any]] = {}
batch_size = graphql_batch_size
for i in range(0, len(pairs), batch_size):
chunk = pairs[i : i + batch_size]
try:
mm = fetch_repos_metadata_graphql(chunk, topics_limit=topics_limit)
except Exception:
mm = {}
if mm:
meta_map.update(mm)
rows: List[Dict[str, Any]] = []
for owner, repo in pairs:
m = meta_map.get(f"{owner}/{repo}") or {}
name = m.get("name") or repo
desc = m.get("description")
stars = m.get("stars")
language = m.get("language")
topics = m.get("topics")
rows.append(
{
"name": name,
"link": f"https://github.com/{owner}/{repo}",
"description": desc if isinstance(desc, str) else None,
"stars": stars if isinstance(stars, int) else None,
"language": language if isinstance(language, str) else None,
"topics": topics if isinstance(topics, list) else [],
}
)
print(
f"[top_1000_repos] Enriching repository metadata via GraphQL (batch size {graphql_batch_size})..."
)
df = pd.DataFrame(rows)
df.to_parquet(out_parquet, index=False)
print(f"Wrote HTML to {out_html}")
print(
f"Saved {len(df)} repos to {out_parquet} and links ({len(repo_links)}) to {out_links}"
)
context.close()
browser.close()
if __name__ == "__main__":
main()