MRiabov commited on
Commit
84edce3
·
1 Parent(s): aaf015f

gh scraping improved and split to utils.

Browse files
.gitignore CHANGED
@@ -1,4 +1,176 @@
1
  *.secret
2
  output/
3
  md-failed.txt
4
- github_links.txt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.secret
2
  output/
3
  md-failed.txt
4
+ github_links.txt
5
+
6
+
7
+ # Byte-compiled / optimized / DLL files
8
+ __pycache__/
9
+ *.py[cod]
10
+ *$py.class
11
+
12
+ # C extensions
13
+ *.so
14
+
15
+ # Distribution / packaging
16
+ .Python
17
+ build/
18
+ develop-eggs/
19
+ dist/
20
+ downloads/
21
+ eggs/
22
+ .eggs/
23
+ lib/
24
+ lib64/
25
+ parts/
26
+ sdist/
27
+ var/
28
+ wheels/
29
+ share/python-wheels/
30
+ *.egg-info/
31
+ .installed.cfg
32
+ *.egg
33
+ MANIFEST
34
+
35
+ # PyInstaller
36
+ # Usually these files are written by a python script from a template
37
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
38
+ *.manifest
39
+ *.spec
40
+
41
+ # Installer logs
42
+ pip-log.txt
43
+ pip-delete-this-directory.txt
44
+
45
+ # Unit test / coverage reports
46
+ htmlcov/
47
+ .tox/
48
+ .nox/
49
+ .coverage
50
+ .coverage.*
51
+ .cache
52
+ nosetests.xml
53
+ coverage.xml
54
+ *.cover
55
+ *.py,cover
56
+ .hypothesis/
57
+ .pytest_cache/
58
+ cover/
59
+
60
+ # Translations
61
+ *.mo
62
+ *.pot
63
+
64
+ # Django stuff:
65
+ *.log
66
+ local_settings.py
67
+ db.sqlite3
68
+ db.sqlite3-journal
69
+
70
+ # Flask stuff:
71
+ instance/
72
+ .webassets-cache
73
+
74
+ # Scrapy stuff:
75
+ .scrapy
76
+
77
+ # Sphinx documentation
78
+ docs/_build/
79
+
80
+ # PyBuilder
81
+ .pybuilder/
82
+ target/
83
+
84
+ # Jupyter Notebook
85
+ .ipynb_checkpoints
86
+
87
+ # IPython
88
+ profile_default/
89
+ ipython_config.py
90
+
91
+ # pyenv
92
+ # For a library or package, you might want to ignore these files since the code is
93
+ # intended to run in multiple environments; otherwise, check them in:
94
+ # .python-version
95
+
96
+ # pipenv
97
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
98
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
99
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
100
+ # install all needed dependencies.
101
+ #Pipfile.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/#use-with-ide
116
+ .pdm.toml
117
+
118
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
119
+ __pypackages__/
120
+
121
+ # Celery stuff
122
+ celerybeat-schedule
123
+ celerybeat.pid
124
+
125
+ # SageMath parsed files
126
+ *.sage.py
127
+
128
+ # Environments
129
+ .env
130
+ .venv
131
+ env/
132
+ venv/
133
+ ENV/
134
+ env.bak/
135
+ venv.bak/
136
+
137
+ # Spyder project settings
138
+ .spyderproject
139
+ .spyproject
140
+
141
+ # Rope project settings
142
+ .ropeproject
143
+
144
+ # mkdocs documentation
145
+ /site
146
+
147
+ # mypy
148
+ .mypy_cache/
149
+ .dmypy.json
150
+ dmypy.json
151
+
152
+ # Pyre type checker
153
+ .pyre/
154
+
155
+ # pytype static type analyzer
156
+ .pytype/
157
+
158
+ # Cython debug symbols
159
+ cython_debug/
160
+
161
+ # PyCharm
162
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
163
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
164
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
165
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
166
+ #.idea/
167
+
168
+ ### Python Patch ###
169
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
170
+ poetry.toml
171
+
172
+ # ruff
173
+ .ruff_cache/
174
+
175
+ # LSP config files
176
+ pyrightconfig.json
data_collection_utils/awesome_final_repos.py CHANGED
@@ -29,13 +29,14 @@ from urllib.parse import urlparse
29
  import asyncio
30
  import aiohttp
31
  import json
32
- import os
33
 
34
  import argparse
35
  import re
36
  import sys
37
  import yaml
38
  from dotenv import load_dotenv
 
39
 
40
  load_dotenv()
41
 
@@ -55,51 +56,17 @@ def save_cache(cache_file: Path, cache: Dict[str, Any]) -> None:
55
  json.dump(cache, f, ensure_ascii=False, indent=2)
56
 
57
 
58
- async def fetch_readme_async(
59
- session: aiohttp.ClientSession, owner: str, repo: str, headers: Dict[str, str]
60
- ) -> Optional[str]:
61
- # Try README.md and readme.md
62
- for name in ["README.md", "readme.md"]:
63
- url = f"https://api.github.com/repos/{owner}/{repo}/contents/{name}"
64
- try:
65
- async with session.get(url, headers=headers) as resp:
66
- if resp.status == 200:
67
- data = await resp.json()
68
- if isinstance(data, dict) and "download_url" in data:
69
- download_url = data["download_url"]
70
- async with session.get(
71
- download_url, headers=headers
72
- ) as download_resp:
73
- if download_resp.status == 200:
74
- return await download_resp.text()
75
- except Exception:
76
- continue
77
- return None
78
-
79
-
80
- def get_aiohttp_headers() -> Dict[str, str]:
81
- token = os.getenv("GITHUB_TOKEN")
82
- headers = {
83
- "Accept": "application/vnd.github.v3+json",
84
- "User-Agent": "awesome-final-repos/1.0",
85
- }
86
- if token:
87
- headers["Authorization"] = f"token {token}"
88
- return headers
89
-
90
-
91
  async def fetch_readme_with_cache(
92
  session: aiohttp.ClientSession,
93
  cache: Dict[str, Any],
94
  cache_file: Path,
95
  owner: str,
96
  repo: str,
97
- headers: Dict[str, str],
98
  ) -> Optional[str]:
99
  key = f"{owner}/{repo}"
100
  if key in cache:
101
  return cache[key]
102
- md = await fetch_readme_async(session, owner, repo, headers)
103
  if md is not None:
104
  cache[key] = md
105
  save_cache(cache_file, cache)
@@ -170,7 +137,6 @@ async def crawl_awesome_final_repos(
170
  session: aiohttp.ClientSession,
171
  cache: Dict[str, Any],
172
  cache_file: Path,
173
- headers: Dict[str, str],
174
  root_repo_url: str,
175
  max_depth: int,
176
  ) -> List[str]:
@@ -189,9 +155,7 @@ async def crawl_awesome_final_repos(
189
  while queue:
190
  owner, repo, depth = queue.popleft()
191
  print(f"[depth={depth}] awesome: {owner}/{repo}")
192
- md = await fetch_readme_with_cache(
193
- session, cache, cache_file, owner, repo, headers
194
- )
195
  if md is None:
196
  print(f" README not found for {owner}/{repo}", file=sys.stderr)
197
  continue
@@ -262,14 +226,11 @@ def main() -> None:
262
  # Load cache
263
  cache = load_cache(cache_file)
264
 
265
- # Get headers
266
- headers = get_aiohttp_headers()
267
-
268
  # Run async crawl
269
  async def run():
270
  async with aiohttp.ClientSession() as session:
271
  finals = await crawl_awesome_final_repos(
272
- session, cache, cache_file, headers, args.root, args.depth
273
  )
274
  out_links = output_dir / "github_links.txt"
275
  out_links.write_text("\n".join(finals) + "\n", encoding="utf-8")
 
29
  import asyncio
30
  import aiohttp
31
  import json
32
+ import os # noqa: F401
33
 
34
  import argparse
35
  import re
36
  import sys
37
  import yaml
38
  from dotenv import load_dotenv
39
+ from github_api_utils import fetch_repo_readme_markdown
40
 
41
  load_dotenv()
42
 
 
56
  json.dump(cache, f, ensure_ascii=False, indent=2)
57
 
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  async def fetch_readme_with_cache(
60
  session: aiohttp.ClientSession,
61
  cache: Dict[str, Any],
62
  cache_file: Path,
63
  owner: str,
64
  repo: str,
 
65
  ) -> Optional[str]:
66
  key = f"{owner}/{repo}"
67
  if key in cache:
68
  return cache[key]
69
+ md = await fetch_repo_readme_markdown(session, owner, repo)
70
  if md is not None:
71
  cache[key] = md
72
  save_cache(cache_file, cache)
 
137
  session: aiohttp.ClientSession,
138
  cache: Dict[str, Any],
139
  cache_file: Path,
 
140
  root_repo_url: str,
141
  max_depth: int,
142
  ) -> List[str]:
 
155
  while queue:
156
  owner, repo, depth = queue.popleft()
157
  print(f"[depth={depth}] awesome: {owner}/{repo}")
158
+ md = await fetch_readme_with_cache(session, cache, cache_file, owner, repo)
 
 
159
  if md is None:
160
  print(f" README not found for {owner}/{repo}", file=sys.stderr)
161
  continue
 
226
  # Load cache
227
  cache = load_cache(cache_file)
228
 
 
 
 
229
  # Run async crawl
230
  async def run():
231
  async with aiohttp.ClientSession() as session:
232
  finals = await crawl_awesome_final_repos(
233
+ session, cache, cache_file, args.root, args.depth
234
  )
235
  out_links = output_dir / "github_links.txt"
236
  out_links.write_text("\n".join(finals) + "\n", encoding="utf-8")
data_collection_utils/awesome_scrap_config.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Awesome Scraper Config
2
+ # Configuration for awesome_final_repos.py
3
+ # All settings can be overridden via CLI args.
4
+
5
+ # Root Awesome repository (full GitHub URL)
6
+ root: "https://github.com/sindresorhus/awesome"
7
+
8
+ # Maximum recursion depth for Awesome sublists
9
+ depth: 2
10
+
11
+ # Output directory for github_links.txt (relative to script dir)
12
+ output_dir: "."
13
+
14
+ # Cache directory for README content (relative to script dir)
15
+ cache_dir: "output/awesome_parse_cache"
16
+
17
+ # Number of concurrent workers for fetching
18
+ workers: 16
data_collection_utils/github_api_utils.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ GitHub API utilities for scraping and metadata collection.
4
+ Separated from scrape_gh_docs.py to keep the main script slimmer.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import os
10
+ import time
11
+ import logging
12
+ import threading
13
+ from urllib.parse import quote_plus
14
+ from typing import Optional, Dict, Any, List
15
+
16
+ import requests
17
+ import aiohttp
18
+
19
+ GITHUB_API = "https://api.github.com"
20
+
21
+ # Use the same logger name as the main script so logs route through its handler
22
+ logger = logging.getLogger("scrape_gh_docs")
23
+
24
+ _thread_local = threading.local()
25
+
26
+
27
+ def github_headers() -> Dict[str, str]:
28
+ token = os.getenv("GITHUB_TOKEN")
29
+ h = {"Accept": "application/vnd.github.v3+json", "User-Agent": "docs-scraper/1.0"}
30
+ if token:
31
+ h["Authorization"] = f"token {token}"
32
+ return h
33
+
34
+
35
+ def get_session() -> requests.Session:
36
+ sess = getattr(_thread_local, "session", None)
37
+ if sess is None:
38
+ sess = requests.Session()
39
+ _thread_local.session = sess
40
+ return sess
41
+
42
+
43
+ def request_json(
44
+ url: str, params: Optional[dict] = None, accept_status=(200,), max_retries: int = 3
45
+ ):
46
+ for attempt in range(max_retries):
47
+ resp = get_session().get(
48
+ url, headers=github_headers(), params=params, timeout=30
49
+ )
50
+ if resp.status_code in accept_status:
51
+ # Some endpoints return empty responses on success (e.g. 204). Handle json errors defensively.
52
+ try:
53
+ return resp.json()
54
+ except Exception:
55
+ return None
56
+ if resp.status_code == 403:
57
+ # rate limit or blocked - try to get reset and sleep
58
+ reset = resp.headers.get("X-RateLimit-Reset")
59
+ ra = resp.headers.get("Retry-After")
60
+ if ra:
61
+ wait = int(ra)
62
+ elif reset:
63
+ wait = max(5, int(reset) - int(time.time()))
64
+ else:
65
+ wait = 30
66
+ logger.warning(
67
+ f"403 from {url}. Sleeping {wait}s (attempt {attempt + 1}/{max_retries})"
68
+ )
69
+ time.sleep(wait)
70
+ continue
71
+ if 500 <= resp.status_code < 600:
72
+ backoff = (attempt + 1) * 5
73
+ logger.warning(f"{resp.status_code} from {url}. Backing off {backoff}s")
74
+ time.sleep(backoff)
75
+ continue
76
+ logger.error(f"Request to {url} returned {resp.status_code}: {resp.text}")
77
+ return None
78
+ logger.error(f"Exhausted retries for {url}")
79
+ return None
80
+
81
+
82
+ def download_file(url: str, dest_path):
83
+ dest_path.parent.mkdir(parents=True, exist_ok=True)
84
+ with get_session().get(url, headers=github_headers(), stream=True, timeout=60) as r:
85
+ r.raise_for_status()
86
+ with open(dest_path, "wb") as f:
87
+ for chunk in r.iter_content(chunk_size=8192):
88
+ if chunk:
89
+ f.write(chunk)
90
+
91
+
92
+ # === High-level GitHub API helpers ===
93
+
94
+
95
+ def get_repo_info(owner: str, repo: str) -> Optional[Dict[str, Any]]:
96
+ url = f"{GITHUB_API}/repos/{owner}/{repo}"
97
+ return request_json(url)
98
+
99
+
100
+ def get_default_branch(
101
+ owner: str, repo: str, repo_json: Optional[Dict[str, Any]] = None
102
+ ) -> Optional[str]:
103
+ if repo_json and "default_branch" in repo_json:
104
+ return repo_json["default_branch"]
105
+ info = get_repo_info(owner, repo)
106
+ if not info:
107
+ return None
108
+ return info.get("default_branch")
109
+
110
+
111
+ def get_latest_commit_date(
112
+ owner: str,
113
+ repo: str,
114
+ ref: Optional[str],
115
+ repo_json: Optional[Dict[str, Any]] = None,
116
+ ) -> Optional[str]:
117
+ """
118
+ Return ISO8601 date string of the latest commit on the given ref (branch or SHA).
119
+ Falls back to repo's pushed_at if commits endpoint returns nothing.
120
+ """
121
+ branch = ref or (repo_json.get("default_branch") if repo_json else None) or "main"
122
+ commits = request_json(
123
+ f"{GITHUB_API}/repos/{owner}/{repo}/commits",
124
+ params={"sha": branch, "per_page": 1},
125
+ accept_status=(200,),
126
+ )
127
+ if isinstance(commits, list) and commits:
128
+ try:
129
+ return commits[0]["commit"]["author"]["date"]
130
+ except Exception:
131
+ pass
132
+ if repo_json is None:
133
+ repo_json = get_repo_info(owner, repo) or {}
134
+ return repo_json.get("pushed_at")
135
+
136
+
137
+ def get_contents(owner: str, repo: str, path: str, ref: Optional[str] = None):
138
+ url = f"{GITHUB_API}/repos/{owner}/{repo}/contents/{quote_plus(path)}"
139
+ params = {"ref": ref} if ref else None
140
+ return request_json(url, params=params, accept_status=(200, 404))
141
+
142
+
143
+ def get_owner_type(owner: str) -> Optional[str]:
144
+ info = request_json(f"{GITHUB_API}/users/{owner}", accept_status=(200, 404))
145
+ if not info:
146
+ return None
147
+ return info.get("type")
148
+
149
+
150
+ def get_org_repos(owner: str, per_page: int = 100) -> List[Dict[str, Any]]:
151
+ owner_type = get_owner_type(owner)
152
+ base = "orgs" if owner_type == "Organization" else "users"
153
+ repos: List[Dict[str, Any]] = []
154
+ page = 1
155
+ while True:
156
+ url = f"{GITHUB_API}/{base}/{owner}/repos"
157
+ params = {"per_page": per_page, "page": page}
158
+ data = request_json(url, params=params)
159
+ if not data:
160
+ if page == 1 and base == "orgs":
161
+ base = "users"
162
+ continue
163
+ break
164
+ repos.extend(data)
165
+ if len(data) < per_page:
166
+ break
167
+ page += 1
168
+ return repos
169
+
170
+
171
+ def search_repos(query: str, per_page: int = 5) -> List[Dict[str, Any]]:
172
+ url = f"{GITHUB_API}/search/repositories"
173
+ params = {"q": query, "per_page": per_page}
174
+ res = request_json(url, params=params, accept_status=(200,))
175
+ if not res:
176
+ return []
177
+ return res.get("items", [])
178
+
179
+
180
+ def get_repo_tree_paths(owner: str, repo: str, ref: Optional[str]) -> List[str]:
181
+ ref = ref or "main"
182
+ url = f"{GITHUB_API}/repos/{owner}/{repo}/git/trees/{quote_plus(ref)}"
183
+ params = {"recursive": 1}
184
+ data = request_json(url, params=params, accept_status=(200,))
185
+ if not data or "tree" not in data:
186
+ return []
187
+ paths: List[str] = []
188
+ for entry in data["tree"]:
189
+ if entry.get("type") == "blob" and "path" in entry:
190
+ paths.append(entry["path"])
191
+ return paths
192
+
193
+
194
+ def get_repo_tree_md_paths(owner: str, repo: str, ref: Optional[str]) -> List[str]:
195
+ """
196
+ Return only Markdown file paths from the repository tree on the given ref
197
+ using the Git Trees API (recursive=1).
198
+
199
+ This is a convenience wrapper over get_repo_tree_paths() that filters to
200
+ .md files, case-insensitive.
201
+ """
202
+ all_paths = get_repo_tree_paths(owner, repo, ref)
203
+ return [p for p in all_paths if p.lower().endswith(".md")]
204
+
205
+
206
+ async def fetch_repo_readme_markdown(
207
+ session: aiohttp.ClientSession, owner: str, repo: str
208
+ ) -> Optional[str]:
209
+ """
210
+ Fetch README markdown using the contents API, trying README.md and readme.md.
211
+ Returns the markdown text or None if not found.
212
+ """
213
+ headers = github_headers()
214
+ for name in ("README.md", "readme.md"):
215
+ url = f"{GITHUB_API}/repos/{owner}/{repo}/contents/{name}"
216
+ try:
217
+ async with session.get(url, headers=headers) as resp:
218
+ if resp.status == 200:
219
+ data = await resp.json()
220
+ if isinstance(data, dict) and "download_url" in data:
221
+ download_url = data["download_url"]
222
+ async with session.get(download_url, headers=headers) as d:
223
+ if d.status == 200:
224
+ return await d.text()
225
+ except Exception:
226
+ continue
227
+ return None
data_collection_utils/repo_tree.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Repository tree utilities: build nested structures from local directories or flat path lists.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from pathlib import Path
9
+ from typing import Dict, Any, List
10
+
11
+
12
+ def _tree_node(name: str, node_type: str) -> Dict[str, Any]:
13
+ assert node_type in ("dir", "file")
14
+ if node_type == "dir":
15
+ return {"name": name, "type": node_type, "children": []}
16
+ return {"name": name, "type": node_type}
17
+
18
+
19
+ def _insert_path_into_tree(root: Dict[str, Any], parts: List[str]):
20
+ node = root
21
+ for i, part in enumerate(parts):
22
+ is_last = i == len(parts) - 1
23
+ if not is_last:
24
+ # find or create directory child
25
+ found = None
26
+ for ch in node["children"]:
27
+ if ch["type"] == "dir" and ch["name"] == part:
28
+ found = ch
29
+ break
30
+ if found is None:
31
+ found = _tree_node(part, "dir")
32
+ node["children"].append(found)
33
+ node = found
34
+ else:
35
+ # file leaf
36
+ node["children"].append(_tree_node(part, "file"))
37
+
38
+
39
+ def build_tree_from_local_dir(base: Path, only_md: bool) -> Dict[str, Any]:
40
+ root = _tree_node(base.name, "dir")
41
+ for p in base.rglob("*"):
42
+ if not p.is_file():
43
+ continue
44
+ if only_md and not p.name.lower().endswith(".md"):
45
+ continue
46
+ rel = p.relative_to(base)
47
+ parts = list(rel.parts)
48
+ if parts:
49
+ _insert_path_into_tree(root, parts)
50
+ return root
51
+
52
+
53
+ def build_tree_from_paths(paths: List[str], root_name: str = "repo") -> Dict[str, Any]:
54
+ root = _tree_node(root_name, "dir")
55
+ for path in paths:
56
+ parts = [p for p in Path(path).parts if p]
57
+ if parts:
58
+ _insert_path_into_tree(root, parts)
59
+ return root
60
+
61
+
62
+ def filter_paths_by_directories(paths: List[str], dir_names: List[str]) -> List[str]:
63
+ """
64
+ Keep only those paths that are under any of the given directory names.
65
+ A match occurs when a path starts with "<dir>/" or contains "/<dir>/".
66
+ """
67
+ if not dir_names:
68
+ return paths
69
+ name_set = set(dir_names)
70
+ out: List[str] = []
71
+ for p in paths:
72
+ for dn in name_set:
73
+ if p.startswith(f"{dn}/") or f"/{dn}/" in p:
74
+ out.append(p)
75
+ break
76
+ return out
data_collection_utils/scrape_gh_docs.py CHANGED
@@ -30,19 +30,18 @@ Outputs:
30
 
31
  import os
32
  import sys
33
- import time
34
  import zipfile
35
  import shutil
36
  import re
37
  import argparse
38
- from urllib.parse import quote_plus, urlparse
39
  from pathlib import Path
40
- import requests
41
  from tqdm import tqdm
42
  import concurrent.futures
43
  import threading
44
  from typing import Optional
45
  from typing import Dict, Any, List
 
46
 
47
  import pandas as pd
48
  import subprocess
@@ -52,52 +51,29 @@ import logging
52
  import langid # https://github.com/saffsd/langid.py
53
 
54
 
55
- GITHUB_API = "https://api.github.com"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- # === Helpers ===
58
 
59
-
60
- def github_headers():
61
- token = os.getenv("GITHUB_TOKEN")
62
- h = {"Accept": "application/vnd.github.v3+json", "User-Agent": "docs-scraper/1.0"}
63
- if token:
64
- h["Authorization"] = f"token {token}"
65
- return h
66
 
67
 
68
- def request_json(url, params=None, accept_status=(200,), max_retries=3):
69
- for attempt in range(max_retries):
70
- resp = get_session().get(
71
- url, headers=github_headers(), params=params, timeout=30
72
- )
73
- if resp.status_code in accept_status:
74
- return resp.json()
75
- if resp.status_code == 403:
76
- # rate limit or blocked - try to get reset and sleep
77
- reset = resp.headers.get("X-RateLimit-Reset")
78
- ra = resp.headers.get("Retry-After")
79
- if ra:
80
- wait = int(ra)
81
- elif reset:
82
- wait = max(5, int(reset) - int(time.time()))
83
- else:
84
- wait = 30
85
- logger.warning(
86
- f"403 from {url}. Sleeping {wait}s (attempt {attempt + 1}/{max_retries})"
87
- )
88
- time.sleep(wait)
89
- continue
90
- if 500 <= resp.status_code < 600:
91
- # server error - backoff
92
- backoff = (attempt + 1) * 5
93
- logger.warning(f"{resp.status_code} from {url}. Backing off {backoff}s")
94
- time.sleep(backoff)
95
- continue
96
- # other errors - return None
97
- logger.error(f"Request to {url} returned {resp.status_code}: {resp.text}")
98
- return None
99
- logger.error(f"Exhausted retries for {url}")
100
- return None
101
 
102
 
103
  def write_text(path, data):
@@ -105,15 +81,7 @@ def write_text(path, data):
105
  path.write_text(data, encoding="utf-8")
106
 
107
 
108
- thread_local = threading.local()
109
-
110
-
111
- def get_session() -> requests.Session:
112
- sess = getattr(thread_local, "session", None)
113
- if sess is None:
114
- sess = requests.Session()
115
- thread_local.session = sess
116
- return sess
117
 
118
 
119
  def ensure_github_token(token_file: Optional[str]) -> str:
@@ -156,14 +124,7 @@ def append_line_threadsafe(
156
  lock.release()
157
 
158
 
159
- def download_file(url, dest_path):
160
- dest_path.parent.mkdir(parents=True, exist_ok=True)
161
- with get_session().get(url, headers=github_headers(), stream=True, timeout=60) as r:
162
- r.raise_for_status()
163
- with open(dest_path, "wb") as f:
164
- for chunk in r.iter_content(chunk_size=8192):
165
- if chunk:
166
- f.write(chunk)
167
 
168
 
169
  def count_md_files(root_dir: Path):
@@ -278,68 +239,22 @@ def compute_md_failed_for_existing(outdir: Path, md_failed_path: Path):
278
  # === Main logic for a single repo ===
279
 
280
 
281
- def get_repo_info(owner, repo):
282
- url = f"{GITHUB_API}/repos/{owner}/{repo}"
283
- return request_json(url)
284
 
285
 
286
- def get_default_branch(owner, repo, repo_json=None):
287
- if repo_json and "default_branch" in repo_json:
288
- return repo_json["default_branch"]
289
- info = get_repo_info(owner, repo)
290
- if not info:
291
- return None
292
- return info.get("default_branch")
293
 
294
 
295
- def get_contents(owner, repo, path, ref=None):
296
- # GET /repos/{owner}/{repo}/contents/{path}
297
- url = f"{GITHUB_API}/repos/{owner}/{repo}/contents/{quote_plus(path)}"
298
- params = {"ref": ref} if ref else None
299
- return request_json(url, params=params, accept_status=(200, 404))
300
 
301
 
302
- def get_owner_type(owner: str) -> Optional[str]:
303
- """Return 'Organization' or 'User' (or None if unknown)."""
304
- info = request_json(f"{GITHUB_API}/users/{owner}", accept_status=(200, 404))
305
- if not info:
306
- return None
307
- return info.get("type")
308
 
309
 
310
- def get_org_repos(owner, per_page=100):
311
- """List repositories for the owner whether it's an organization or a user.
312
 
313
- Tries to detect owner type via /users/{owner}. Falls back appropriately.
314
- """
315
- owner_type = get_owner_type(owner)
316
- base = "orgs" if owner_type == "Organization" else "users"
317
- repos = []
318
- page = 1
319
- while True:
320
- url = f"{GITHUB_API}/{base}/{owner}/repos"
321
- params = {"per_page": per_page, "page": page}
322
- data = request_json(url, params=params)
323
- if not data:
324
- # If org endpoint 404'd earlier and we misdetected, try the other once
325
- if page == 1 and base == "orgs":
326
- base = "users"
327
- continue
328
- break
329
- repos.extend(data)
330
- if len(data) < per_page:
331
- break
332
- page += 1
333
- return repos
334
-
335
-
336
- def search_repos(query, per_page=5):
337
- url = f"{GITHUB_API}/search/repositories"
338
- params = {"q": query, "per_page": per_page}
339
- res = request_json(url, params=params, accept_status=(200,))
340
- if not res:
341
- return []
342
- return res.get("items", [])
343
 
344
 
345
  def download_docs_folder(
@@ -450,6 +365,9 @@ def download_repo_zip(owner, repo, ref, outdir, only_md: bool = False):
450
  return extract_to
451
 
452
 
 
 
 
453
  def sparse_checkout_docs(
454
  owner: str,
455
  repo: str,
@@ -643,6 +561,7 @@ def process_repo_entry(
643
  "md_count": None,
644
  "status": "ok",
645
  "note": None,
 
646
  }
647
  got_any = False
648
  default_branch = None
@@ -725,6 +644,9 @@ def process_repo_entry(
725
  default_branch = branch_guess
726
  result["default_branch"] = branch_guess
727
  result["method"] = "sparse_docs"
 
 
 
728
  got_any = True
729
  break
730
  except FileNotFoundError:
@@ -753,6 +675,7 @@ def process_repo_entry(
753
  default_branch = branch_guess
754
  result["default_branch"] = branch_guess
755
  result["method"] = "zip_whole_repo"
 
756
  got_any = True
757
  break
758
  except Exception as e:
@@ -810,6 +733,9 @@ def process_repo_entry(
810
  )
811
  got_any = True
812
  result["method"] = "docs_folder_in_repo"
 
 
 
813
  elif isinstance(contents, dict) and contents.get("type") == "file":
814
  logger.info(f"Found file at docs (single-file). Downloading...")
815
  if not dry_run:
@@ -818,6 +744,9 @@ def process_repo_entry(
818
  )
819
  got_any = True
820
  result["method"] = "docs_file_in_repo"
 
 
 
821
  else:
822
  # contents returned but unknown structure: skip to next steps
823
  pass
@@ -840,6 +769,7 @@ def process_repo_entry(
840
  )
841
  got_any = True
842
  result["method"] = "org_docs_repo_zip"
 
843
  else:
844
  # Step 3 fallback: use search API for repos in owner scope with '{repo} docs' in name
845
  owner_type = get_owner_type(owner)
@@ -861,6 +791,10 @@ def process_repo_entry(
861
  )
862
  got_any = True
863
  result["method"] = "search_repo_zip"
 
 
 
 
864
 
865
  if not got_any:
866
  logger.warning(
@@ -915,7 +849,7 @@ def main():
915
  parser.add_argument(
916
  "--no-fetch",
917
  action="store_true",
918
- help="Do not perform any network downloads; scan existing outdir to build Parquet and md-failed",
919
  )
920
  args = parser.parse_args()
921
 
@@ -941,6 +875,7 @@ def main():
941
  dry_run_value = bool(cfg.get("dry_run", False))
942
  workers_value = int(cfg.get("workers", 4))
943
  texts_parquet_value = _resolve_cfg_path(cfg.get("texts_parquet"))
 
944
  token_file_value = _resolve_cfg_path(cfg.get("token_file"))
945
  prefer_zip_value = bool(cfg.get("prefer_zip", False))
946
  prefer_sparse_value = bool(cfg.get("prefer_sparse", False))
@@ -1078,6 +1013,177 @@ def main():
1078
  except Exception as e:
1079
  logger.error(f"Failed to write per-file Parquet to {texts_parquet_path}: {e}")
1080
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1081
  logger.info("Done. Check output directory and md-failed.txt")
1082
 
1083
 
 
30
 
31
  import os
32
  import sys
 
33
  import zipfile
34
  import shutil
35
  import re
36
  import argparse
37
+ from urllib.parse import urlparse
38
  from pathlib import Path
 
39
  from tqdm import tqdm
40
  import concurrent.futures
41
  import threading
42
  from typing import Optional
43
  from typing import Dict, Any, List
44
+ import json
45
 
46
  import pandas as pd
47
  import subprocess
 
51
  import langid # https://github.com/saffsd/langid.py
52
 
53
 
54
+ from github_api_utils import (
55
+ download_file,
56
+ get_repo_info,
57
+ get_latest_commit_date,
58
+ get_contents,
59
+ get_owner_type,
60
+ get_org_repos,
61
+ search_repos,
62
+ get_repo_tree_paths,
63
+ get_repo_tree_md_paths,
64
+ )
65
+ from repo_tree import (
66
+ build_tree_from_local_dir,
67
+ build_tree_from_paths,
68
+ filter_paths_by_directories,
69
+ )
70
 
71
+ # Note: core Github helpers and repo tree builders are defined in the modules above
72
 
73
+ # === Helpers (local only) ===
 
 
 
 
 
 
74
 
75
 
76
+ # moved: request_json -> github_api_utils.request_json
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
 
79
  def write_text(path, data):
 
81
  path.write_text(data, encoding="utf-8")
82
 
83
 
84
+ # moved: session management -> github_api_utils.get_session
 
 
 
 
 
 
 
 
85
 
86
 
87
  def ensure_github_token(token_file: Optional[str]) -> str:
 
124
  lock.release()
125
 
126
 
127
+ # moved: download_file -> github_api_utils.download_file
 
 
 
 
 
 
 
128
 
129
 
130
  def count_md_files(root_dir: Path):
 
239
  # === Main logic for a single repo ===
240
 
241
 
242
+ # moved: get_repo_info -> github_api_utils.get_repo_info
 
 
243
 
244
 
245
+ # moved: get_default_branch -> github_api_utils.get_default_branch
 
 
 
 
 
 
246
 
247
 
248
+ # moved: get_contents -> github_api_utils.get_contents
 
 
 
 
249
 
250
 
251
+ # moved: get_owner_type -> github_api_utils.get_owner_type
 
 
 
 
 
252
 
253
 
254
+ # moved: get_org_repos -> github_api_utils.get_org_repos
 
255
 
256
+
257
+ # moved: search_repos -> github_api_utils.search_repos
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
 
259
 
260
  def download_docs_folder(
 
365
  return extract_to
366
 
367
 
368
+ # === Repo structure helpers moved to data_collection_utils.repo_tree ===
369
+
370
+
371
  def sparse_checkout_docs(
372
  owner: str,
373
  repo: str,
 
561
  "md_count": None,
562
  "status": "ok",
563
  "note": None,
564
+ "docs_found_in": None,
565
  }
566
  got_any = False
567
  default_branch = None
 
644
  default_branch = branch_guess
645
  result["default_branch"] = branch_guess
646
  result["method"] = "sparse_docs"
647
+ result["docs_found_in"] = (
648
+ f"https://github.com/{owner}/{repo}/tree/{branch_guess}/docs"
649
+ )
650
  got_any = True
651
  break
652
  except FileNotFoundError:
 
675
  default_branch = branch_guess
676
  result["default_branch"] = branch_guess
677
  result["method"] = "zip_whole_repo"
678
+ result["docs_found_in"] = f"https://github.com/{owner}/{repo}"
679
  got_any = True
680
  break
681
  except Exception as e:
 
733
  )
734
  got_any = True
735
  result["method"] = "docs_folder_in_repo"
736
+ result["docs_found_in"] = (
737
+ f"https://github.com/{owner}/{repo}/tree/{default_branch}/{docs_path}"
738
+ )
739
  elif isinstance(contents, dict) and contents.get("type") == "file":
740
  logger.info(f"Found file at docs (single-file). Downloading...")
741
  if not dry_run:
 
744
  )
745
  got_any = True
746
  result["method"] = "docs_file_in_repo"
747
+ result["docs_found_in"] = (
748
+ f"https://github.com/{owner}/{repo}/blob/{default_branch}/{docs_path}"
749
+ )
750
  else:
751
  # contents returned but unknown structure: skip to next steps
752
  pass
 
769
  )
770
  got_any = True
771
  result["method"] = "org_docs_repo_zip"
772
+ result["docs_found_in"] = f"https://github.com/{owner}/{cand_name}"
773
  else:
774
  # Step 3 fallback: use search API for repos in owner scope with '{repo} docs' in name
775
  owner_type = get_owner_type(owner)
 
791
  )
792
  got_any = True
793
  result["method"] = "search_repo_zip"
794
+ result["docs_found_in"] = first.get(
795
+ "html_url",
796
+ f"https://github.com/{first['owner']['login']}/{first['name']}",
797
+ )
798
 
799
  if not got_any:
800
  logger.warning(
 
849
  parser.add_argument(
850
  "--no-fetch",
851
  action="store_true",
852
+ help="Do not perform any network downloads; only scan existing outdir to build Parquet and md-failed",
853
  )
854
  args = parser.parse_args()
855
 
 
875
  dry_run_value = bool(cfg.get("dry_run", False))
876
  workers_value = int(cfg.get("workers", 4))
877
  texts_parquet_value = _resolve_cfg_path(cfg.get("texts_parquet"))
878
+ repometa_parquet_value = _resolve_cfg_path(cfg.get("repometa_parquet"))
879
  token_file_value = _resolve_cfg_path(cfg.get("token_file"))
880
  prefer_zip_value = bool(cfg.get("prefer_zip", False))
881
  prefer_sparse_value = bool(cfg.get("prefer_sparse", False))
 
1013
  except Exception as e:
1014
  logger.error(f"Failed to write per-file Parquet to {texts_parquet_path}: {e}")
1015
 
1016
+ # Build and save repo metadata dataset
1017
+ repometa_rows: List[Dict[str, Any]] = []
1018
+ if no_fetch_value:
1019
+ # Derive metadata purely from local folders (no network calls)
1020
+ for d in repo_dirs:
1021
+ try:
1022
+ owner, repo = d.name.split("__", 1)
1023
+ except ValueError:
1024
+ continue
1025
+ link = f"https://github.com/{owner}/{repo}"
1026
+ # Determine docs folder similar to compute_md_failed_for_existing()
1027
+ if (d / "docs").exists():
1028
+ docs_folder = d / "docs"
1029
+ else:
1030
+ found = None
1031
+ for p in d.rglob("docs"):
1032
+ if p.is_dir():
1033
+ found = p
1034
+ break
1035
+ docs_folder = found if found else d
1036
+
1037
+ docs_tree_json = None
1038
+ if docs_folder.exists() and docs_folder.is_dir():
1039
+ docs_tree = build_tree_from_local_dir(docs_folder, only_md=True)
1040
+ docs_tree_json = json.dumps(docs_tree, ensure_ascii=False)
1041
+
1042
+ full_tree = build_tree_from_local_dir(d, only_md=False)
1043
+ full_tree_json = json.dumps(full_tree, ensure_ascii=False)
1044
+
1045
+ repometa_rows.append(
1046
+ {
1047
+ "latest_commit_date": None, # unknown without network
1048
+ "name": repo,
1049
+ "parent_org": owner,
1050
+ "stars": None, # unknown without network
1051
+ "link": link,
1052
+ "docs_found_in": None,
1053
+ "docs_repo_structure": docs_tree_json,
1054
+ "repo_structure_all_files": full_tree_json,
1055
+ }
1056
+ )
1057
+ else:
1058
+ for res in results:
1059
+ owner = res.get("owner")
1060
+ repo = res.get("repo")
1061
+ if not owner or not repo:
1062
+ continue
1063
+ try:
1064
+ repo_json = get_repo_info(owner, repo) or {}
1065
+ default_branch = res.get("default_branch") or repo_json.get(
1066
+ "default_branch", "main"
1067
+ )
1068
+ # latest commit date via utility (fallback to pushed_at handled inside)
1069
+ latest_commit_date = get_latest_commit_date(
1070
+ owner, repo, default_branch, repo_json
1071
+ )
1072
+
1073
+ stars = repo_json.get("stargazers_count")
1074
+ link = f"https://github.com/{owner}/{repo}"
1075
+ docs_found_in = res.get("docs_found_in")
1076
+
1077
+ # docs repo structure (only .md) via Git Trees API; scope it to the actual container that held the docs
1078
+ # Examples:
1079
+ # - docker: use repo 'docker/docs' (entire repo)
1080
+ # - flutter: use 'flutter/flutter' restricted to the 'docs/' folder
1081
+ method = res.get("method")
1082
+ docs_tree_json = None
1083
+ try:
1084
+ docs_src_owner = owner
1085
+ docs_src_repo = repo
1086
+ docs_src_ref = default_branch
1087
+ path_filters: List[str] | None = None # prefix filters
1088
+
1089
+ if method in ("org_docs_repo_zip", "search_repo_zip"):
1090
+ # docs are in a separate repository; parse from docs_found_in URL
1091
+ dfi = res.get("docs_found_in")
1092
+ if isinstance(dfi, str) and dfi.startswith("http"):
1093
+ u = urlparse(dfi)
1094
+ parts = [p for p in u.path.split("/") if p]
1095
+ if len(parts) >= 2:
1096
+ docs_src_owner, docs_src_repo = parts[0], parts[1]
1097
+ info = (
1098
+ get_repo_info(docs_src_owner, docs_src_repo) or {}
1099
+ )
1100
+ docs_src_ref = info.get("default_branch", "main")
1101
+ # else: fallback to original owner/repo
1102
+ # For full separate docs repos, include all .md files (no path filter)
1103
+ path_filters = None
1104
+ elif method in ("docs_folder_in_repo", "docs_file_in_repo"):
1105
+ # Restrict to top-level docs/ folder
1106
+ path_filters = ["docs"]
1107
+ elif method in ("sparse_docs", "zip_whole_repo"):
1108
+ # Include common doc directory names anywhere in the tree
1109
+ path_filters = ["docs", "doc", "documentation"]
1110
+
1111
+ md_paths_all = get_repo_tree_md_paths(
1112
+ docs_src_owner, docs_src_repo, docs_src_ref
1113
+ )
1114
+ if path_filters is not None:
1115
+ md_paths = filter_paths_by_directories(
1116
+ md_paths_all, path_filters
1117
+ )
1118
+ else:
1119
+ md_paths = md_paths_all
1120
+
1121
+ if md_paths:
1122
+ root_name = f"{docs_src_owner}__{docs_src_repo}"
1123
+ docs_tree = build_tree_from_paths(md_paths, root_name=root_name)
1124
+ docs_tree_json = json.dumps(docs_tree, ensure_ascii=False)
1125
+ except Exception:
1126
+ pass
1127
+ if docs_tree_json is None:
1128
+ # Fallback to local docs dir if available
1129
+ docs_folder_rel = res.get("docs_folder")
1130
+ if docs_folder_rel:
1131
+ docs_dir = outdir / docs_folder_rel
1132
+ if docs_dir.exists() and docs_dir.is_dir():
1133
+ docs_tree = build_tree_from_local_dir(
1134
+ docs_dir, only_md=True
1135
+ )
1136
+ docs_tree_json = json.dumps(docs_tree, ensure_ascii=False)
1137
+
1138
+ # full repo structure via GitHub Tree API; fallback to local saved root if API fails
1139
+ full_tree_json = None
1140
+ try:
1141
+ paths = get_repo_tree_paths(owner, repo, default_branch)
1142
+ if paths:
1143
+ full_tree = build_tree_from_paths(
1144
+ paths, root_name=f"{owner}__{repo}"
1145
+ )
1146
+ full_tree_json = json.dumps(full_tree, ensure_ascii=False)
1147
+ except Exception:
1148
+ pass
1149
+ if full_tree_json is None:
1150
+ saved_root = outdir / safe_name(f"{owner}__{repo}")
1151
+ if saved_root.exists():
1152
+ full_tree = build_tree_from_local_dir(saved_root, only_md=False)
1153
+ full_tree_json = json.dumps(full_tree, ensure_ascii=False)
1154
+
1155
+ repometa_rows.append(
1156
+ {
1157
+ "latest_commit_date": latest_commit_date,
1158
+ "name": repo,
1159
+ "parent_org": owner,
1160
+ "stars": stars,
1161
+ "link": link,
1162
+ "docs_found_in": docs_found_in,
1163
+ "docs_repo_structure": docs_tree_json,
1164
+ "repo_structure_all_files": full_tree_json,
1165
+ }
1166
+ )
1167
+ except Exception as e:
1168
+ logger.warning(f"Failed to build repometa for {owner}/{repo}: {e}")
1169
+
1170
+ repometa_parquet_path = (
1171
+ Path(repometa_parquet_value)
1172
+ if repometa_parquet_value
1173
+ else (outdir / "repometa.parquet")
1174
+ )
1175
+ try:
1176
+ df_meta = pd.DataFrame(repometa_rows)
1177
+ repometa_parquet_path.parent.mkdir(parents=True, exist_ok=True)
1178
+ df_meta.to_parquet(repometa_parquet_path, index=False)
1179
+ logger.info(
1180
+ f"Wrote repo metadata dataset to {repometa_parquet_path} (rows={len(repometa_rows)})"
1181
+ )
1182
+ except Exception as e:
1183
+ logger.error(
1184
+ f"Failed to write repo metadata Parquet to {repometa_parquet_path}: {e}"
1185
+ )
1186
+
1187
  logger.info("Done. Check output directory and md-failed.txt")
1188
 
1189
 
data_collection_utils/top_1000_repos.py CHANGED
@@ -115,7 +115,9 @@ def main() -> None:
115
  f.write("\n".join(repo_links) + "\n")
116
 
117
  print(f"Wrote HTML to {out_html}")
118
- print(f"Found {len(repo_links)} original GitHub repositories and saved to {out_links}")
 
 
119
 
120
  context.close()
121
  browser.close()
 
115
  f.write("\n".join(repo_links) + "\n")
116
 
117
  print(f"Wrote HTML to {out_html}")
118
+ print(
119
+ f"Found {len(repo_links)} original GitHub repositories and saved to {out_links}"
120
+ )
121
 
122
  context.close()
123
  browser.close()