File size: 9,015 Bytes
171d541
 
 
33b9fd0
1d44b3f
c554ef1
fd67ea2
 
171d541
 
 
ad95ea6
171d541
 
 
 
 
 
 
 
 
 
ad95ea6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171d541
 
 
 
 
ad95ea6
171d541
 
 
 
 
 
 
 
ad95ea6
fd67ea2
e8fe308
fd67ea2
 
e8fe308
 
 
 
fd67ea2
e8fe308
 
5d305bb
 
 
 
fd67ea2
 
 
5d305bb
fd67ea2
 
e8fe308
fd67ea2
 
 
 
5d305bb
 
 
 
 
 
 
fd67ea2
5d305bb
fd67ea2
 
 
 
 
 
 
 
 
 
e8fe308
 
171d541
fd67ea2
 
5d305bb
fd67ea2
1d44b3f
 
 
 
 
 
 
 
 
 
 
 
 
 
171d541
1d44b3f
ad95ea6
 
5d305bb
 
 
1d44b3f
 
 
 
 
 
 
 
 
 
 
 
 
 
171d541
5d305bb
171d541
1d44b3f
171d541
 
 
 
 
 
33b9fd0
5d305bb
 
 
33b9fd0
 
 
 
 
 
9a6935f
33b9fd0
 
 
 
 
 
 
1d44b3f
33b9fd0
 
 
5d305bb
33b9fd0
171d541
 
 
5d305bb
171d541
33b9fd0
 
 
 
 
 
 
 
1d44b3f
5d305bb
 
 
33b9fd0
 
5d305bb
 
 
33b9fd0
 
 
fd67ea2
 
 
 
 
 
 
1d44b3f
fd67ea2
 
 
1d44b3f
fd67ea2
 
 
 
33b9fd0
 
fd67ea2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33b9fd0
5d305bb
 
 
33b9fd0
 
 
 
 
171d541
 
 
 
 
ad95ea6
171d541
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
from pathlib import Path
from playwright.sync_api import sync_playwright
from urllib.parse import urlparse
from typing import List, Optional, Dict, Any
import yaml
import pandas as pd
from github_api_utils import fetch_repos_metadata_graphql
from dotenv import load_dotenv

URL = "https://top1000repos.com/"


def canonical_repo_url(url: str) -> Optional[str]:
    p = urlparse(url)
    if p.scheme != "https" or p.netloc != "github.com":
        return None
    parts = [part for part in p.path.split("/") if part]
    if len(parts) < 2:
        return None
    owner, repo = parts[0], parts[1]
    # Exclude non-repo top-level paths like topics, orgs, marketplace, etc.
    blocked_owners = {
        "topics",
        "collections",
        "orgs",
        "marketplace",
        "features",
        "pricing",
        "about",
        "site",
        "blog",
        "events",
        "apps",
        "sponsors",
        "login",
        "join",
        "explore",
        "contact",
        "settings",
        "search",
        "codespaces",
    }
    if owner in blocked_owners:
        return None
    return f"https://github.com/{owner}/{repo}"


def normalize_github_repo_links(links: List[str]) -> List[str]:
    repos = set()
    for u in links:
        cu = canonical_repo_url(u)
        if cu is not None:
            repos.add(cu)
    return sorted(repos)


# removed: resolve_to_original_repo (replaced by GraphQL-based mapping)


def parse_owner_repo(url: str) -> Optional[tuple[str, str]]:
    p = urlparse(url)
    parts = [part for part in p.path.split("/") if part]
    if len(parts) < 2:
        return None
    return parts[0], parts[1]


def map_to_original_repos_graphql(
    urls: List[str], *, batch_size: int = 30, topics_limit: int = 0
) -> List[str]:
    """Resolve forks to their parent repos using batched GraphQL metadata requests.

    - For each input repo URL, if it's a fork and parent_url is available, map to parent.
    - Returns the sorted unique list of canonical GitHub URLs.
    - Queries are sent in chunks of ``batch_size`` to avoid oversized GraphQL payloads.
    """
    pairs: List[tuple[str, str]] = []
    for u in urls:
        pr = parse_owner_repo(u)
        if pr is None:
            continue
        pairs.append(pr)
    # Batch query to avoid 502s on oversized GraphQL requests
    meta: Dict[str, Dict[str, Any]] = {}
    for i in range(0, len(pairs), batch_size):
        chunk = pairs[i : i + batch_size]
        mm = fetch_repos_metadata_graphql(chunk, topics_limit=topics_limit)
        if mm:
            meta.update(mm)
    out: set[str] = set()
    for owner, repo in pairs:
        key = f"{owner}/{repo}"
        m = meta.get(key) or {}
        parent_url = m.get("parent_url")
        if m.get("is_fork") and isinstance(parent_url, str):
            cu = canonical_repo_url(parent_url)
            if cu is not None:
                out.add(cu)
                continue
        out.add(f"https://github.com/{owner}/{repo}")
    return sorted(out)


def main() -> None:
    # Load token from .env for GraphQL
    load_dotenv()
    print("[top_1000_repos] Starting script execution. This may take a minute or so...")

    # Load YAML config next to this script if present
    cfg: Dict[str, Any] = {}
    cfg_path = Path(__file__).with_name("top_1000_repos_config.yaml")
    if cfg_path.exists():
        cfg = yaml.safe_load(cfg_path.read_text(encoding="utf-8")) or {}

    def _resolve_cfg_path(val: Optional[str]) -> Optional[Path]:
        if val is None:
            return None
        p = Path(val)
        if not p.is_absolute():
            p = (cfg_path.parent / p).resolve()
        return p

    project_root = Path(__file__).resolve().parents[1]
    out_html = _resolve_cfg_path(cfg.get("out_html")) or Path(__file__).with_name(
        "Top 1000 GitHub repositories, updated daily, all on one page..html"
    )
    out_links = _resolve_cfg_path(cfg.get("out_links")) or (
        project_root / "github_links.txt"
    )
    out_parquet = _resolve_cfg_path(cfg.get("out_parquet")) or Path(__file__).with_name(
        "top-1000-repos.parquet"
    )

    headless = bool(cfg.get("headless", True))
    # Scrolling config
    scroll_max_iters = int(cfg.get("scroll_max_iters", 200))
    scroll_pause_ms = int(cfg.get("scroll_pause_ms", 300))
    stable_threshold = int(cfg.get("stable_threshold", 10))
    min_anchors = int(cfg.get("min_anchors", 1500))
    # GraphQL config
    graphql_batch_size = int(cfg.get("graphql_batch_size", 30))
    topics_limit = int(cfg.get("topics_limit", 20))
    fork_resolution = bool(cfg.get("fork_resolution", True))

    print("[top_1000_repos] Launching Playwright browser...")
    with sync_playwright() as p:
        browser = p.chromium.launch(headless=headless)
        context = browser.new_context()
        page = context.new_page()
        page.goto(URL, wait_until="networkidle")
        # Wait until at least one GitHub link is present in the DOM
        page.wait_for_selector('a[href*="https://github.com/"]', timeout=30000)

        # Auto-scroll to force lazy loading/virtualized list to render all items
        def _scroll_all(
            max_iters: int = scroll_max_iters, pause_ms: int = scroll_pause_ms
        ) -> None:
            prev_count = 0
            stable = 0
            for _ in range(max_iters):
                page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
                page.wait_for_timeout(pause_ms)
                count = page.eval_on_selector_all(
                    'a[href^="https://github.com/"]', "els => els.length"
                )
                if count <= prev_count:
                    stable += 1
                else:
                    stable = 0
                    prev_count = count
                # Stop after several iterations without growth or when clearly above 1000 anchors
                if stable >= stable_threshold or prev_count >= min_anchors:
                    break

        _scroll_all()
        print("[top_1000_repos] Scrolling completed. Extracting links...")

        # Save rendered HTML
        html = page.content()
        out_html.write_text(html, encoding="utf-8")
        print(f"[top_1000_repos] Saved rendered HTML to {out_html}")

        # Extract canonical GitHub repo URLs from the DOM after full scroll
        links = page.eval_on_selector_all(
            'a[href*="https://github.com/"]',
            "els => Array.from(new Set(els.map(e => e.href))).sort()",
        )
        repo_links = normalize_github_repo_links(links)

        # Optionally map any fork links to their original repositories and deduplicate
        if fork_resolution:
            repo_links = map_to_original_repos_graphql(
                repo_links, batch_size=graphql_batch_size, topics_limit=0
            )

        # Persist github_links.txt for visibility/debug (even if not used downstream)
        print(
            f"[top_1000_repos] Writing {len(repo_links)} repository links to {out_links}"
        )
        with out_links.open("w", encoding="utf-8") as f:
            f.write("\n".join(repo_links) + "\n")

        # Enrich via GraphQL in batches
        pairs: List[tuple[str, str]] = []
        for u in repo_links:
            pr = parse_owner_repo(u)
            if pr is not None:
                pairs.append(pr)
        meta_map: Dict[str, Dict[str, Any]] = {}
        batch_size = graphql_batch_size
        for i in range(0, len(pairs), batch_size):
            chunk = pairs[i : i + batch_size]
            try:
                mm = fetch_repos_metadata_graphql(chunk, topics_limit=topics_limit)
            except Exception:
                mm = {}
            if mm:
                meta_map.update(mm)

        rows: List[Dict[str, Any]] = []
        for owner, repo in pairs:
            m = meta_map.get(f"{owner}/{repo}") or {}
            name = m.get("name") or repo
            desc = m.get("description")
            stars = m.get("stars")
            language = m.get("language")
            topics = m.get("topics")
            rows.append(
                {
                    "name": name,
                    "link": f"https://github.com/{owner}/{repo}",
                    "description": desc if isinstance(desc, str) else None,
                    "stars": stars if isinstance(stars, int) else None,
                    "language": language if isinstance(language, str) else None,
                    "topics": topics if isinstance(topics, list) else [],
                }
            )

        print(
            f"[top_1000_repos] Enriching repository metadata via GraphQL (batch size {graphql_batch_size})..."
        )
        df = pd.DataFrame(rows)
        df.to_parquet(out_parquet, index=False)
        print(f"Wrote HTML to {out_html}")
        print(
            f"Saved {len(df)} repos to {out_parquet} and links ({len(repo_links)}) to {out_links}"
        )

        context.close()
        browser.close()


if __name__ == "__main__":
    main()