gooddocs-v0 / data_collection_utils /github_api_utils.py
MRiabov's picture
(format) format
c6bfa44
raw
history blame
20.5 kB
#!/usr/bin/env python3
"""
GitHub API utilities for scraping and metadata collection.
Separated from scrape_gh_docs.py to keep the main script slimmer.
"""
from __future__ import annotations
import os
import time
import logging
import threading
from urllib.parse import quote_plus
from typing import Optional, Dict, Any, List
import json
import requests
import aiohttp
GITHUB_API = "https://api.github.com"
# Use the same logger name as the main script so logs route through its handler
logger = logging.getLogger("scrape_gh_docs")
_thread_local = threading.local()
def github_headers() -> Dict[str, str]:
token = os.getenv("GITHUB_TOKEN")
assert token is not None, "Set the Github token to run this script!"
h = {"Accept": "application/vnd.github.v3+json", "User-Agent": "docs-scraper/1.0"}
if token:
h["Authorization"] = f"token {token}"
return h
def get_session() -> requests.Session:
sess = getattr(_thread_local, "session", None)
if sess is None:
sess = requests.Session()
_thread_local.session = sess
return sess
def request_json(
url: str, params: Optional[dict] = None, accept_status=(200,), max_retries: int = 3
):
for attempt in range(max_retries):
resp = get_session().get(
url, headers=github_headers(), params=params, timeout=30
)
if resp.status_code in accept_status:
# Some endpoints return empty responses on success (e.g. 204). Handle json errors defensively.
try:
return resp.json()
except Exception:
return None
if resp.status_code in (403, 529):
# rate limit or blocked - try to get reset and sleep
reset = resp.headers.get("X-RateLimit-Reset")
ra = resp.headers.get("Retry-After")
if ra:
wait = int(ra)
elif reset:
wait = max(5, int(reset) - int(time.time()))
else:
wait = 30
logger.warning(
f"{resp.status_code} from {url}. Sleeping {wait}s (attempt {attempt + 1}/{max_retries})"
)
time.sleep(wait)
continue
if 500 <= resp.status_code < 600:
backoff = (attempt + 1) * 5
logger.warning(f"{resp.status_code} from {url}. Backing off {backoff}s")
time.sleep(backoff)
continue
logger.error(f"Request to {url} returned {resp.status_code}: {resp.text}")
return None
logger.error(f"Exhausted retries for {url}")
return None
def find_readme_file(
owner: str, repo: str, ref: Optional[str] = None
) -> Optional[Dict[str, str]]:
"""
Find a README file located at the repository root using the Contents API.
Returns a dict with keys: 'name', 'path', 'download_url' if found, else None.
README detection is case-insensitive and prioritizes extensions: .md < .rst < .org < others,
and shorter names if tie-break is needed.
"""
# List root contents; passing empty path lists the repository root
root_list = get_contents(owner, repo, "", ref=ref)
if not isinstance(root_list, list):
return None
candidates: List[tuple[tuple[int, int], Dict[str, Any]]] = []
prio_map = {".md": 0, ".rst": 1, ".org": 2}
for it in root_list:
try:
if it.get("type") != "file":
continue
name = it.get("name") or ""
if not name:
continue
if not name.lower().startswith("readme"):
continue
ext = ""
if "." in name:
ext = name[name.rfind(".") :].lower()
prio = (prio_map.get(ext, 3), len(name))
candidates.append((prio, it))
except Exception:
continue
if not candidates:
return None
candidates.sort(key=lambda x: x[0])
best = candidates[0][1]
# The contents API entry includes 'name', 'path', 'download_url'
name = best.get("name")
path = best.get("path")
download_url = best.get("download_url")
if (
isinstance(name, str)
and isinstance(path, str)
and isinstance(download_url, str)
):
return {"name": name, "path": path, "download_url": download_url}
return None
def download_readme_to(
owner: str, repo: str, dest_root, ref: Optional[str] = None
) -> Optional[Dict[str, str]]:
"""
Locate README at repo root and download it into dest_root/NAME.
Returns the README info dict if downloaded, else None.
"""
info = find_readme_file(owner, repo, ref=ref)
if not info:
return None
dest_root.mkdir(parents=True, exist_ok=True)
dest = dest_root / info["name"]
try:
download_file(info["download_url"], dest)
return info
except Exception:
logger.warning(f"Failed to download README for {owner}/{repo}: {info['name']}")
return None
def download_file(url: str, dest_path):
dest_path.parent.mkdir(parents=True, exist_ok=True)
with get_session().get(url, headers=github_headers(), stream=True, timeout=60) as r:
r.raise_for_status()
with open(dest_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
# === High-level GitHub API helpers ===
def get_repo_info(owner: str, repo: str) -> Optional[Dict[str, Any]]:
url = f"{GITHUB_API}/repos/{owner}/{repo}"
return request_json(url)
def get_default_branch(
owner: str, repo: str, repo_json: Optional[Dict[str, Any]] = None
) -> Optional[str]:
if repo_json and "default_branch" in repo_json:
return repo_json["default_branch"]
info = get_repo_info(owner, repo)
if not info:
return None
return info.get("default_branch")
def get_latest_commit_date(
owner: str,
repo: str,
ref: Optional[str],
repo_json: Optional[Dict[str, Any]] = None,
) -> Optional[str]:
"""
Return ISO8601 date string of the latest commit on the given ref (branch or SHA).
Falls back to repo's pushed_at if commits endpoint returns nothing.
"""
branch = ref or (repo_json.get("default_branch") if repo_json else None) or "main"
commits = request_json(
f"{GITHUB_API}/repos/{owner}/{repo}/commits",
params={"sha": branch, "per_page": 1},
accept_status=(200,),
)
if isinstance(commits, list) and commits:
try:
return commits[0]["commit"]["author"]["date"]
except Exception:
pass
if repo_json is None:
repo_json = get_repo_info(owner, repo) or {}
return repo_json.get("pushed_at")
def get_contents(owner: str, repo: str, path: str, ref: Optional[str] = None):
url = f"{GITHUB_API}/repos/{owner}/{repo}/contents/{quote_plus(path)}"
params = {"ref": ref} if ref else None
return request_json(url, params=params, accept_status=(200, 404))
def get_owner_type(owner: str) -> Optional[str]:
info = request_json(f"{GITHUB_API}/users/{owner}", accept_status=(200, 404))
if not info:
return None
return info.get("type")
def get_org_repos(owner: str, per_page: int = 100) -> List[Dict[str, Any]]:
owner_type = get_owner_type(owner)
base = "orgs" if owner_type == "Organization" else "users"
repos: List[Dict[str, Any]] = []
page = 1
while True:
url = f"{GITHUB_API}/{base}/{owner}/repos"
params = {"per_page": per_page, "page": page}
data = request_json(url, params=params)
if not data:
if page == 1 and base == "orgs":
base = "users"
continue
break
repos.extend(data)
if len(data) < per_page:
break
page += 1
return repos
def search_repos(query: str, per_page: int = 5) -> List[Dict[str, Any]]:
url = f"{GITHUB_API}/search/repositories"
params = {"q": query, "per_page": per_page}
res = request_json(url, params=params, accept_status=(200,))
if not res:
return []
return res.get("items", [])
def get_repo_tree_paths(owner: str, repo: str, ref: Optional[str]) -> List[str]:
ref = ref or "main"
url = f"{GITHUB_API}/repos/{owner}/{repo}/git/trees/{quote_plus(ref)}"
params = {"recursive": 1}
data = request_json(url, params=params, accept_status=(200,))
if not data or "tree" not in data:
return []
paths: List[str] = []
for entry in data["tree"]:
if entry.get("type") == "blob" and "path" in entry:
paths.append(entry["path"])
return paths
def get_repo_tree_md_paths(owner: str, repo: str, ref: Optional[str]) -> List[str]:
"""
Return only Markdown file paths from the repository tree on the given ref
using the Git Trees API (recursive=1).
This is a convenience wrapper over get_repo_tree_paths() that filters to
.md files, case-insensitive.
"""
all_paths = get_repo_tree_paths(owner, repo, ref)
return [p for p in all_paths if p.lower().endswith(".md")]
async def fetch_repo_readme_markdown(
session: aiohttp.ClientSession, owner: str, repo: str
) -> Optional[str]:
"""
Fetch README markdown using the contents API, trying README.md and readme.md.
Returns the markdown text or None if not found.
"""
headers = github_headers()
for name in ("README.md", "readme.md"):
url = f"{GITHUB_API}/repos/{owner}/{repo}/contents/{name}"
try:
async with session.get(url, headers=headers) as resp:
if resp.status == 200:
data = await resp.json()
if isinstance(data, dict) and "download_url" in data:
download_url = data["download_url"]
async with session.get(download_url, headers=headers) as d:
if d.status == 200:
return await d.text()
except Exception:
continue
# Fallback: list the repo tree at depth=0 (root only) and select any file starting with README*
try:
# Get default branch to address the tree head
repo_url = f"{GITHUB_API}/repos/{owner}/{repo}"
default_branch = "main"
async with session.get(repo_url, headers=headers) as rinfo:
if rinfo.status == 200:
info = await rinfo.json()
if isinstance(info, dict) and info.get("default_branch"):
default_branch = info["default_branch"]
# Depth=0 tree (root only). Omitting recursive parameter implies non-recursive.
tree_url = (
f"{GITHUB_API}/repos/{owner}/{repo}/git/trees/{quote_plus(default_branch)}"
)
async with session.get(tree_url, headers=headers) as rtree:
if rtree.status != 200:
return None
tree = await rtree.json()
if not isinstance(tree, dict) or "tree" not in tree:
return None
entries = tree["tree"]
# Find candidates that start with README (case-insensitive) and are files (blobs)
candidates = []
for e in entries:
if e.get("type") != "blob":
continue
path = e.get("path")
if not path:
continue
name_lower = path.lower()
if name_lower.startswith("readme"):
# Priority: .md < .rst < .org < others; shorter names first
prio_map = {".md": 0, ".rst": 1, ".org": 2}
ext = ""
if "." in path:
ext = path[path.rfind(".") :].lower()
prio = (prio_map.get(ext, 3), len(path))
candidates.append((prio, path))
if not candidates:
return None
candidates.sort()
chosen_path = candidates[0][1]
# Fetch the chosen README variant via contents API to get a direct download URL
contents_url = (
f"{GITHUB_API}/repos/{owner}/{repo}/contents/{quote_plus(chosen_path)}"
)
async with session.get(contents_url, headers=headers) as rc:
if rc.status != 200:
return None
cdata = await rc.json()
if isinstance(cdata, dict) and "download_url" in cdata:
download_url = cdata["download_url"]
async with session.get(download_url, headers=headers) as rd:
if rd.status == 200:
return await rd.text()
except Exception:
return None
return None
async def fetch_repo_description(
session: aiohttp.ClientSession, owner: str, repo: str
) -> Optional[str]:
url = f"https://api.github.com/repos/{owner}/{repo}"
try:
async with session.get(url, headers=github_headers()) as resp:
if resp.status == 200:
data = await resp.json()
if isinstance(data, dict) and "description" in data:
desc = data["description"]
if isinstance(desc, str):
return desc
except Exception:
return None
return None
# === GitHub GraphQL helpers ===
GITHUB_GRAPHQL = "https://api.github.com/graphql"
def _graphql_headers() -> Dict[str, str]:
token = os.getenv("GITHUB_TOKEN")
h = {"Content-Type": "application/json", "User-Agent": "docs-scraper/1.0"}
if token:
# GitHub GraphQL expects a bearer token
h["Authorization"] = f"bearer {token}"
return h
def fetch_repos_metadata_graphql(
pairs: List[tuple[str, str]],
topics_limit: int = 20,
) -> Dict[str, Dict[str, Any]]:
"""
Batch-fetch repository metadata via GitHub GraphQL for a list of (owner, repo) pairs.
Returns a mapping from "owner/repo" -> {
"name": str | None,
"description": str | None,
"stars": int | None,
"default_branch": str | None,
"last_commit_date": str | None,
"language": str | None,
"topics": List[str],
"is_fork": bool | None,
"parent_url": str | None,
}
Notes:
- last_commit_date is taken from the latest commit on the default branch when available;
falls back to pushedAt otherwise.
- Callers should keep batch sizes modest (e.g., <= 30) to stay under GraphQL cost limits.
"""
if not pairs:
return {}
# Build a single query with aliased repository fields and use variables for owner/name
# Example:
# query($owner0: String!, $name0: String!, ...) {
# repo_0: repository(owner: $owner0, name: $name0) { ...fields }
# }
vars: Dict[str, Any] = {}
fields: List[str] = []
for i, (owner, name) in enumerate(pairs):
vars[f"owner{i}"] = owner
vars[f"name{i}"] = name
alias = f"repo_{i}"
header = f"{alias}: repository(owner: $owner{i}, name: $name{i}) "
topics_fragment = (
" repositoryTopics(first: "
+ str(topics_limit)
+ ") { nodes { topic { name } } }\n"
if topics_limit and topics_limit > 0
else ""
)
body = (
"{\n"
" name\n"
" description\n"
" stargazerCount\n"
" pushedAt\n"
" isFork\n"
" parent { url nameWithOwner }\n"
" primaryLanguage { name }\n" + topics_fragment + " defaultBranchRef {\n"
" name\n"
" target {\n"
" ... on Commit {\n"
" history(first: 1) {\n"
" nodes {\n"
" committedDate\n"
" }\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n"
)
fields.append(header + body)
# Assemble query
var_decls = " ".join(
[f"$owner{i}: String!, $name{i}: String!" for i in range(len(pairs))]
)
query = "query(" + var_decls + ") {\n" + "\n".join(fields) + "\n}"
payload = {"query": query, "variables": vars}
max_retries = 3
for attempt in range(max_retries):
resp = get_session().post(
GITHUB_GRAPHQL,
headers=_graphql_headers(),
data=json.dumps(payload),
timeout=60,
)
if resp.status_code == 200:
try:
data = resp.json()
except Exception:
logger.error("Failed to decode GraphQL JSON response")
return {}
if "errors" in data and data["errors"]:
# Log the first error for context but still try to parse any partial data
logger.warning(f"GraphQL response has errors: {data['errors'][0]}")
repos = data.get("data", {}) or {}
break
if resp.status_code in (403, 529):
reset = resp.headers.get("X-RateLimit-Reset")
ra = resp.headers.get("Retry-After")
if ra:
wait = int(ra)
elif reset:
wait = max(5, int(reset) - int(time.time()))
else:
wait = 30
logger.warning(
f"GraphQL {resp.status_code}. Sleeping {wait}s (attempt {attempt + 1}/{max_retries})"
)
time.sleep(wait)
continue
if 500 <= resp.status_code < 600:
backoff = (attempt + 1) * 5
logger.warning(f"GraphQL {resp.status_code}. Backing off {backoff}s")
time.sleep(backoff)
continue
logger.error(f"GraphQL error {resp.status_code}: {resp.text}")
return {}
else:
logger.error("Exhausted retries for GraphQL request")
return {}
out: Dict[str, Dict[str, Any]] = {}
for i, (owner, name) in enumerate(pairs):
alias = f"repo_{i}"
r = repos.get(alias)
key = f"{owner}/{name}"
if not isinstance(r, dict):
out[key] = {
"name": None,
"description": None,
"stars": None,
"default_branch": None,
"last_commit_date": None,
"language": None,
"topics": [],
"is_fork": None,
"parent_url": None,
}
continue
repo_name = r.get("name")
desc = r.get("description")
stars = r.get("stargazerCount")
pushed_at = r.get("pushedAt")
is_fork = r.get("isFork")
parent = r.get("parent") or {}
parent_url = parent.get("url") if isinstance(parent, dict) else None
lang_obj = r.get("primaryLanguage") or {}
language = lang_obj.get("name") if isinstance(lang_obj, dict) else None
topics_list: List[str] = []
rt = r.get("repositoryTopics") or {}
nodes = rt.get("nodes") if isinstance(rt, dict) else None
if isinstance(nodes, list):
for node in nodes:
t = None
if isinstance(node, dict):
topic = node.get("topic")
if isinstance(topic, dict):
t = topic.get("name")
if isinstance(t, str):
topics_list.append(t)
dbr = r.get("defaultBranchRef") or {}
default_branch = dbr.get("name") if isinstance(dbr, dict) else None
last_commit_date = pushed_at
if isinstance(dbr, dict):
tgt = dbr.get("target") or {}
hist = tgt.get("history") or {}
nodes = hist.get("nodes") or []
if isinstance(nodes, list) and nodes:
lcd = nodes[0].get("committedDate")
if isinstance(lcd, str):
last_commit_date = lcd
out[key] = {
"name": repo_name if isinstance(repo_name, str) else None,
"description": desc if isinstance(desc, str) else None,
"stars": stars,
"default_branch": default_branch,
"last_commit_date": last_commit_date,
"language": language if isinstance(language, str) else None,
"topics": topics_list,
"is_fork": is_fork if isinstance(is_fork, bool) else None,
"parent_url": parent_url if isinstance(parent_url, str) else None,
}
return out