Extract awesome repos to parquet
Browse files
data_collection_utils/awesome_final_repos.py
CHANGED
|
@@ -7,14 +7,14 @@ Build a list of "final" GitHub repositories starting from the Awesome list-of-li
|
|
| 7 |
(case-insensitive). We recursively traverse linked Awesome lists to collect end repositories.
|
| 8 |
|
| 9 |
We fetch README markdown via the GitHub API (contents) instead of parsing HTML. From the README
|
| 10 |
-
markdown we
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
-
Output:
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
This file mirrors the output expected by data_collection_utils/parse_gh_docs_config.yaml
|
| 17 |
-
as the input for scrape_gh_docs.py, and writes to data_collection_utils/github_links.txt.
|
| 18 |
|
| 19 |
Usage:
|
| 20 |
python3 data_collection_utils/awesome_final_repos.py \
|
|
@@ -37,6 +37,7 @@ import sys
|
|
| 37 |
import yaml
|
| 38 |
from dotenv import load_dotenv
|
| 39 |
from github_api_utils import fetch_repo_readme_markdown
|
|
|
|
| 40 |
|
| 41 |
load_dotenv()
|
| 42 |
|
|
@@ -133,13 +134,61 @@ def extract_github_links_from_markdown(md: str) -> List[str]:
|
|
| 133 |
return sorted(urls)
|
| 134 |
|
| 135 |
|
| 136 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
session: aiohttp.ClientSession,
|
| 138 |
cache: Dict[str, Any],
|
| 139 |
cache_file: Path,
|
| 140 |
root_repo_url: str,
|
| 141 |
max_depth: int,
|
| 142 |
-
) -> List[str]:
|
| 143 |
root_cu = canonical_repo_url(root_repo_url)
|
| 144 |
assert root_cu is not None, f"Not a canonical GitHub repo URL: {root_repo_url}"
|
| 145 |
root_owner, root_repo = parse_owner_repo(root_cu)
|
|
@@ -150,7 +199,8 @@ async def crawl_awesome_final_repos(
|
|
| 150 |
visited_awesome.add(root_cu)
|
| 151 |
queue.append((root_owner, root_repo, 0))
|
| 152 |
|
| 153 |
-
|
|
|
|
| 154 |
|
| 155 |
while queue:
|
| 156 |
owner, repo, depth = queue.popleft()
|
|
@@ -159,13 +209,13 @@ async def crawl_awesome_final_repos(
|
|
| 159 |
if md is None:
|
| 160 |
print(f" README not found for {owner}/{repo}", file=sys.stderr)
|
| 161 |
continue
|
| 162 |
-
|
| 163 |
-
for
|
| 164 |
-
cu = canonical_repo_url(
|
| 165 |
if cu is None:
|
| 166 |
continue
|
| 167 |
o, r = parse_owner_repo(cu)
|
| 168 |
-
# Skip self-references
|
| 169 |
if o == owner and r == repo:
|
| 170 |
continue
|
| 171 |
if is_awesome_repo_name(r):
|
|
@@ -173,9 +223,14 @@ async def crawl_awesome_final_repos(
|
|
| 173 |
visited_awesome.add(cu)
|
| 174 |
queue.append((o, r, depth + 1))
|
| 175 |
else:
|
| 176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
-
return
|
| 179 |
|
| 180 |
|
| 181 |
def main() -> None:
|
|
@@ -209,7 +264,7 @@ def main() -> None:
|
|
| 209 |
ap.add_argument(
|
| 210 |
"--output-dir",
|
| 211 |
default=cfg.get("output_dir", "."),
|
| 212 |
-
help="Output directory for
|
| 213 |
)
|
| 214 |
ap.add_argument(
|
| 215 |
"--cache-dir",
|
|
@@ -229,13 +284,15 @@ def main() -> None:
|
|
| 229 |
# Run async crawl
|
| 230 |
async def run():
|
| 231 |
async with aiohttp.ClientSession() as session:
|
| 232 |
-
|
| 233 |
session, cache, cache_file, args.root, args.depth
|
| 234 |
)
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
|
|
|
|
|
|
| 239 |
|
| 240 |
asyncio.run(run())
|
| 241 |
|
|
|
|
| 7 |
(case-insensitive). We recursively traverse linked Awesome lists to collect end repositories.
|
| 8 |
|
| 9 |
We fetch README markdown via the GitHub API (contents) instead of parsing HTML. From the README
|
| 10 |
+
markdown we parse bullet entries like:
|
| 11 |
+
* [Fuse](https://github.com/owner/repo) - Mobile development tools.
|
| 12 |
+
and extract: name (link text), canonical GitHub link, and description (text after " - ").
|
| 13 |
+
If there is no " - ", we capture the entire line (with the markdown link removed) as description.
|
| 14 |
+
We exclude any repos whose name contains "awesome" and recursively traverse those as Awesome lists.
|
| 15 |
|
| 16 |
+
Output: a Parquet file with columns: name, link, description
|
| 17 |
+
awesome-repos.parquet
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
Usage:
|
| 20 |
python3 data_collection_utils/awesome_final_repos.py \
|
|
|
|
| 37 |
import yaml
|
| 38 |
from dotenv import load_dotenv
|
| 39 |
from github_api_utils import fetch_repo_readme_markdown
|
| 40 |
+
import pandas as pd
|
| 41 |
|
| 42 |
load_dotenv()
|
| 43 |
|
|
|
|
| 134 |
return sorted(urls)
|
| 135 |
|
| 136 |
|
| 137 |
+
def _extract_entries_from_markdown_lines(md: str, current_owner: str, current_repo: str) -> List[Dict[str, str]]:
|
| 138 |
+
"""
|
| 139 |
+
Extract entries of the form: bullet + [name](url) optionally followed by " - description".
|
| 140 |
+
If there's no " - ", use the entire line as description but remove the [name](url) part.
|
| 141 |
+
Returns a list of dicts with keys: name, url, description (raw url before canonicalization).
|
| 142 |
+
"""
|
| 143 |
+
entries: List[Dict[str, str]] = []
|
| 144 |
+
lines = md.splitlines()
|
| 145 |
+
# Regex to find markdown links in a line
|
| 146 |
+
link_re = re.compile(r"\[([^\]]+)\]\((https?://[^)\s]+)\)")
|
| 147 |
+
bullet_re = re.compile(r"^\s*[-*+]\s+")
|
| 148 |
+
for line in lines:
|
| 149 |
+
# pick the first link on the line that canonicalizes to a GitHub repo
|
| 150 |
+
chosen = None
|
| 151 |
+
for m in link_re.finditer(line):
|
| 152 |
+
url_candidate = m.group(2).strip()
|
| 153 |
+
cu = canonical_repo_url(url_candidate)
|
| 154 |
+
if cu is None:
|
| 155 |
+
continue
|
| 156 |
+
o, r = parse_owner_repo(cu)
|
| 157 |
+
# Skip self-referential links (including anchors to the current README/file)
|
| 158 |
+
if o == current_owner and r == current_repo:
|
| 159 |
+
continue
|
| 160 |
+
chosen = m
|
| 161 |
+
break
|
| 162 |
+
if not chosen:
|
| 163 |
+
continue
|
| 164 |
+
name = chosen.group(1).strip()
|
| 165 |
+
url = chosen.group(2).strip()
|
| 166 |
+
# Determine description
|
| 167 |
+
right = line[chosen.end() :]
|
| 168 |
+
desc: str
|
| 169 |
+
# If right side starts with optional spaces, then dash + space, we take after it
|
| 170 |
+
m_dash = re.match(r"^\s*-\s+(.*)$", right)
|
| 171 |
+
if m_dash is not None:
|
| 172 |
+
desc = m_dash.group(1).strip()
|
| 173 |
+
else:
|
| 174 |
+
# Use entire line without the bullet marker and without the [name](url) segment
|
| 175 |
+
without_link = (line[: chosen.start()] + line[chosen.end() :]).strip()
|
| 176 |
+
# Strip a leading bullet marker if present
|
| 177 |
+
if bullet_re.match(without_link):
|
| 178 |
+
# remove the first bullet marker occurrence
|
| 179 |
+
without_link = bullet_re.sub("", without_link, count=1).strip()
|
| 180 |
+
desc = without_link
|
| 181 |
+
entries.append({"name": name, "url": url, "description": desc})
|
| 182 |
+
return entries
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
async def crawl_awesome_final_entries(
|
| 186 |
session: aiohttp.ClientSession,
|
| 187 |
cache: Dict[str, Any],
|
| 188 |
cache_file: Path,
|
| 189 |
root_repo_url: str,
|
| 190 |
max_depth: int,
|
| 191 |
+
) -> List[Dict[str, str]]:
|
| 192 |
root_cu = canonical_repo_url(root_repo_url)
|
| 193 |
assert root_cu is not None, f"Not a canonical GitHub repo URL: {root_repo_url}"
|
| 194 |
root_owner, root_repo = parse_owner_repo(root_cu)
|
|
|
|
| 199 |
visited_awesome.add(root_cu)
|
| 200 |
queue.append((root_owner, root_repo, 0))
|
| 201 |
|
| 202 |
+
# map canonical link -> {name, link, description}
|
| 203 |
+
results: Dict[str, Dict[str, str]] = {}
|
| 204 |
|
| 205 |
while queue:
|
| 206 |
owner, repo, depth = queue.popleft()
|
|
|
|
| 209 |
if md is None:
|
| 210 |
print(f" README not found for {owner}/{repo}", file=sys.stderr)
|
| 211 |
continue
|
| 212 |
+
entries = _extract_entries_from_markdown_lines(md, owner, repo)
|
| 213 |
+
for e in entries:
|
| 214 |
+
cu = canonical_repo_url(e["url"])
|
| 215 |
if cu is None:
|
| 216 |
continue
|
| 217 |
o, r = parse_owner_repo(cu)
|
| 218 |
+
# Skip self-references
|
| 219 |
if o == owner and r == repo:
|
| 220 |
continue
|
| 221 |
if is_awesome_repo_name(r):
|
|
|
|
| 223 |
visited_awesome.add(cu)
|
| 224 |
queue.append((o, r, depth + 1))
|
| 225 |
else:
|
| 226 |
+
if cu not in results:
|
| 227 |
+
results[cu] = {"name": e["name"], "link": cu, "description": e["description"]}
|
| 228 |
+
else:
|
| 229 |
+
# Prefer keeping the first occurrence; if existing description is empty and new is not, update
|
| 230 |
+
if not results[cu]["description"] and e["description"]:
|
| 231 |
+
results[cu]["description"] = e["description"]
|
| 232 |
|
| 233 |
+
return list(results.values())
|
| 234 |
|
| 235 |
|
| 236 |
def main() -> None:
|
|
|
|
| 264 |
ap.add_argument(
|
| 265 |
"--output-dir",
|
| 266 |
default=cfg.get("output_dir", "."),
|
| 267 |
+
help="Output directory for awesome-repos.parquet",
|
| 268 |
)
|
| 269 |
ap.add_argument(
|
| 270 |
"--cache-dir",
|
|
|
|
| 284 |
# Run async crawl
|
| 285 |
async def run():
|
| 286 |
async with aiohttp.ClientSession() as session:
|
| 287 |
+
rows = await crawl_awesome_final_entries(
|
| 288 |
session, cache, cache_file, args.root, args.depth
|
| 289 |
)
|
| 290 |
+
out_parquet = output_dir / "awesome-repos.parquet"
|
| 291 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 292 |
+
df = pd.DataFrame(rows, columns=["name", "link", "description"])
|
| 293 |
+
df.to_parquet(out_parquet, index=False)
|
| 294 |
+
print(f"Collected {len(rows)} final repositories with descriptions")
|
| 295 |
+
print(f"Wrote to {out_parquet}")
|
| 296 |
|
| 297 |
asyncio.run(run())
|
| 298 |
|