MRiabov commited on
Commit
33b9fd0
·
1 Parent(s): 857c602

TEI embedding generation for filtering.

Browse files
.gitignore CHANGED
@@ -175,3 +175,4 @@ poetry.toml
175
 
176
  # LSP config files
177
  pyrightconfig.json
 
 
175
 
176
  # LSP config files
177
  pyrightconfig.json
178
+ data_collection_utils/Top 1000 GitHub repositories, updated daily, all on one page..html
data_collection_utils/embed_repo_descriptions.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Embed GitHub repo short descriptions in batches using a Hugging Face Text Embeddings Inference (TEI)
4
+ server with the Qwen/Qwen3-Embedding-0.6B model, and persist results to Parquet.
5
+
6
+ Configuration (YAML): place a config file next to this script named
7
+ embed_repo_descriptions_config.yaml
8
+
9
+ Example config:
10
+ tei_url: http://127.0.0.1:8080
11
+ batch_size: 256
12
+ timeout: 120
13
+ output: ../repo_description_embeddings.parquet
14
+ # Optional: prepend a single custom instruction to each text for models like Qwen3-Embedding
15
+ # The instruction is applied only to the embedding input as:
16
+ # "{instruction}\nQuery: repo name: {name}\n" \
17
+ # "description: {text}"
18
+ # and the original description is preserved in the Parquet output.
19
+ custom_instruction: "Given a web search query, retrieve relevant passages that answer the query"
20
+ # Optional custom header for providers like SaladCloud
21
+ custom_header: Salad-Api-Key
22
+ custom_header_env: API_KEY # value will be read from this env var
23
+ data_sources:
24
+ - path: data_collection_utils/awesome-repos.parquet
25
+ source: awesome
26
+ - path: data_collection_utils/top-1000-repos.parquet
27
+ source: top1000
28
+
29
+ Notes:
30
+ - Secrets (e.g., HF_API_TOKEN) should be set in a .env file in the project root or env.
31
+ - Paths in the YAML are resolved relative to the YAML file location.
32
+
33
+ Output schema (Parquet):
34
+ - link: string
35
+ - name: string
36
+ - description: string
37
+ - source: string ('awesome' or 'top1000')
38
+ - dim: int32 (embedding dimension)
39
+ - embedding: list<float32>
40
+
41
+ Notes:
42
+ - This script targets the OpenAI-compatible TEI endpoint: POST {tei_url}/v1/embeddings
43
+ with payload: {"input": ["text1", ...]}. The model will be whatever TEI is serving.
44
+ - If your TEI requires auth, pass a Bearer token via --hf-token or HF_API_TOKEN env var.
45
+ - Batches are streamed and written incrementally to keep memory bounded.
46
+ - If `custom_instruction` is set, the embedding input becomes:
47
+ "{custom_instruction}\nQuery: repo name: {repo_name}\ndescription: {original_text}"
48
+ Only the embedding input is modified; the Parquet `description` column remains the original text.
49
+ """
50
+
51
+ from __future__ import annotations
52
+
53
+ import os
54
+ import argparse
55
+ from pathlib import Path
56
+ from typing import List, Any
57
+
58
+ import pandas as pd
59
+ import pyarrow as pa
60
+ import pyarrow.parquet as pq
61
+ from tqdm import tqdm
62
+ import yaml
63
+ from dotenv import load_dotenv
64
+ from huggingface_hub import InferenceClient
65
+ import numpy as np
66
+ from concurrent.futures import ThreadPoolExecutor, as_completed
67
+ import requests
68
+
69
+
70
+ def _read_parquet_safe(path: Path, source_label: str) -> pd.DataFrame:
71
+ df = pd.read_parquet(path)
72
+ # Keep only required columns in a fixed order if present
73
+ cols = [c for c in ["name", "link", "description"] if c in df.columns]
74
+ assert "link" in cols and "description" in cols, (
75
+ f"Expected columns 'link' and 'description' in {path}"
76
+ )
77
+ df = df[cols].copy()
78
+ df["source"] = source_label
79
+ return df
80
+
81
+
82
+ def load_inputs_from_sources(sources: List[Any], cfg_dir: Path) -> pd.DataFrame:
83
+ parts: List[pd.DataFrame] = []
84
+ for entry in sources:
85
+ if isinstance(entry, dict):
86
+ assert "path" in entry, "Each data_sources item must have a 'path'"
87
+ p = Path(entry["path"])
88
+ if not p.is_absolute():
89
+ p = (cfg_dir / p).resolve()
90
+ label = entry.get("source") or p.stem
91
+ else:
92
+ # allow shorthand string path
93
+ p = Path(entry)
94
+ if not p.is_absolute():
95
+ p = (cfg_dir / p).resolve()
96
+ label = p.stem
97
+ assert p.exists(), f"Input parquet not found: {p}"
98
+ parts.append(_read_parquet_safe(p, label))
99
+ assert parts, "No input parquet files found via data_sources."
100
+ df = pd.concat(parts, ignore_index=True)
101
+ # Filter to non-empty descriptions and deduplicate by link
102
+ df = df[df["description"].notna()]
103
+ df = df[df["description"].astype(str).str.strip() != ""]
104
+ df = df.drop_duplicates(subset=["link"]) # keep first occurrence
105
+ # Optional: normalize types for consistency
106
+ if "name" not in df.columns:
107
+ df["name"] = None
108
+ df = df[["link", "name", "description", "source"]]
109
+ return df
110
+
111
+
112
+ def embed_batch_tei(
113
+ client: InferenceClient,
114
+ model_url: str,
115
+ texts: List[str],
116
+ ) -> Any:
117
+ # InferenceClient.feature_extraction supports list[str] inputs and returns list[list[float]]
118
+ embs = client.feature_extraction(texts, model=model_url)
119
+ # Normalize to List[List[float]]
120
+ if isinstance(embs, list):
121
+ assert len(embs) == len(texts) and isinstance(embs[0], (list, tuple)), "Unexpected feature_extraction output"
122
+ return embs
123
+ # Otherwise accept numpy arrays (preferred for performance)
124
+ assert hasattr(embs, "shape"), f"Unexpected feature_extraction type: {type(embs)}"
125
+ assert embs.shape[0] == len(texts), "Embedding batch size mismatch"
126
+ return embs
127
+
128
+
129
+ def main() -> None:
130
+ ap = argparse.ArgumentParser(
131
+ description="Batch-embed repo descriptions using TEI and persist to Parquet (YAML-configured)"
132
+ )
133
+ ap.add_argument(
134
+ "--config",
135
+ default=str(Path(__file__).with_name("embed_repo_descriptions_config.yaml")),
136
+ help="Path to YAML config (default: next to script)",
137
+ )
138
+ ap.add_argument(
139
+ "--limit",
140
+ type=int,
141
+ default=None,
142
+ help="Optional limit on number of rows for a dry run",
143
+ )
144
+ args = ap.parse_args()
145
+
146
+ # Load secrets from .env
147
+ load_dotenv()
148
+
149
+ cfg_path = Path(args.config)
150
+ assert cfg_path.exists(), f"Config not found: {cfg_path}"
151
+ cfg_dir = cfg_path.parent
152
+ cfg = yaml.safe_load(cfg_path.read_text(encoding="utf-8")) or {}
153
+
154
+ # Required config
155
+ tei_url = cfg.get("tei_url")
156
+ assert tei_url, "Missing 'tei_url' in config"
157
+ data_sources = cfg.get("data_sources")
158
+ assert isinstance(data_sources, list) and data_sources, (
159
+ "Config must provide non-empty 'data_sources' list"
160
+ )
161
+
162
+ # Optional config with defaults
163
+ batch_size = int(cfg.get("batch_size", 256))
164
+ timeout = int(cfg.get("timeout", 120))
165
+ concurrency = int(cfg.get("concurrency", 1))
166
+ assert concurrency >= 1, "concurrency must be >= 1"
167
+ out_cfg = cfg.get(
168
+ "output",
169
+ str(
170
+ Path(__file__).resolve().parents[1] / "repo_description_embeddings.parquet"
171
+ ),
172
+ )
173
+ out_path = Path(out_cfg)
174
+ if not out_path.is_absolute():
175
+ out_path = (cfg_dir / out_path).resolve()
176
+ # Token is sourced from env (e.g., set via .env)
177
+ token_env = cfg.get("hf_token_env", "HF_API_TOKEN")
178
+ hf_token = os.getenv(token_env)
179
+ # Optional single custom instruction (applied only to embedding input)
180
+ custom_instruction = cfg.get("custom_instruction")
181
+ if custom_instruction is not None:
182
+ assert isinstance(custom_instruction, str) and custom_instruction.strip() != "", (
183
+ "custom_instruction must be a non-empty string"
184
+ )
185
+ # Optional custom header (e.g., Salad-Api-Key)
186
+ custom_header_name = cfg.get("custom_header")
187
+ custom_header_env = cfg.get("custom_header_env", "API_KEY")
188
+ custom_header_value = os.getenv(custom_header_env) if custom_header_name else None
189
+ if custom_header_name:
190
+ assert (
191
+ custom_header_value is not None and custom_header_value != ""
192
+ ), f"custom_header is set to '{custom_header_name}' but env var '{custom_header_env}' is not set or empty"
193
+
194
+ # Build headers for client
195
+ client_headers = {}
196
+ if hf_token:
197
+ client_headers["Authorization"] = f"Bearer {hf_token}"
198
+ if custom_header_name and custom_header_value:
199
+ client_headers[custom_header_name] = custom_header_value
200
+
201
+ # Health check: ensure TEI is ready before proceeding
202
+ health_url = tei_url.rstrip("/") + "/health"
203
+ for i in range(3):
204
+ resp = requests.get(health_url, headers=client_headers, timeout=timeout)
205
+ assert resp.status_code == 200, (
206
+ f"Health check failed on attempt {i+1}/3: {resp.status_code} {resp.text[:200]}"
207
+ )
208
+
209
+ # Compute model URL for embeddings endpoint
210
+ model_url = tei_url if tei_url.rstrip("/").endswith("/embed") else tei_url.rstrip("/") + "/embed"
211
+
212
+ df = load_inputs_from_sources(data_sources, cfg_dir)
213
+ if args.limit is not None and args.limit > 0:
214
+ df = df.head(args.limit)
215
+ # Ensure repo names are present if a custom instruction will join name + description
216
+ if custom_instruction is not None:
217
+ assert "name" in df.columns and df["name"].notna().all(), (
218
+ "When using custom_instruction, 'name' must be present and non-null for all rows"
219
+ )
220
+
221
+ # Prepare Parquet writer with a fixed schema
222
+ list_float32 = pa.list_(pa.float32())
223
+ schema = pa.schema(
224
+ [
225
+ ("link", pa.string()),
226
+ ("name", pa.string()),
227
+ ("description", pa.string()),
228
+ ("source", pa.string()),
229
+ ("dim", pa.int32()),
230
+ ("embedding", list_float32),
231
+ ]
232
+ )
233
+
234
+ out_path.parent.mkdir(parents=True, exist_ok=True)
235
+ writer = None
236
+ try:
237
+ writer = pq.ParquetWriter(out_path, schema)
238
+ texts: List[str] = df["description"].astype(str).tolist()
239
+ links: List[str] = df["link"].astype(str).tolist()
240
+ # Preserve nulls for name rather than coercing to "None"
241
+ names: List[Any] = (
242
+ df["name"].tolist() if "name" in df.columns else [None] * len(df)
243
+ )
244
+ sources: List[str] = df["source"].astype(str).tolist()
245
+
246
+ dim: int | None = None
247
+ total_batches = (len(texts) + batch_size - 1) // batch_size
248
+
249
+ def _submit_batch(b: int):
250
+ start = b * batch_size
251
+ end = min(start + batch_size, len(texts))
252
+ batch_texts = texts[start:end]
253
+ batch_names = names[start:end]
254
+ # Apply instruction formatting for embedding input only
255
+ embed_batch_texts = (
256
+ batch_texts
257
+ if custom_instruction is None
258
+ else [
259
+ f"{custom_instruction}\nQuery: repo name: {n}\ndescription: {t}"
260
+ for n, t in zip(batch_names, batch_texts)
261
+ ]
262
+ )
263
+ # Create a per-task client to avoid potential thread-safety issues in shared sessions
264
+ local_client = InferenceClient(headers=client_headers, timeout=timeout)
265
+ embs = embed_batch_tei(
266
+ local_client,
267
+ model_url,
268
+ embed_batch_texts,
269
+ )
270
+ return b, embs, batch_texts, batch_names, links[start:end], sources[start:end]
271
+
272
+ if concurrency == 1:
273
+ for b in tqdm(range(total_batches), total=total_batches, desc="Embedding", dynamic_ncols=True):
274
+ b_idx, embs, batch_texts, batch_names, batch_links, batch_sources = _submit_batch(b)
275
+ if dim is None:
276
+ dim = (embs.shape[1] if hasattr(embs, "shape") else len(embs[0]))
277
+ batch_dims = [dim] * (embs.shape[0] if hasattr(embs, "shape") else len(embs))
278
+ # Build Arrow arrays
279
+ arr_link = pa.array(batch_links, type=pa.string())
280
+ arr_name = pa.array(batch_names, type=pa.string())
281
+ arr_desc = pa.array(batch_texts, type=pa.string())
282
+ arr_source = pa.array(batch_sources, type=pa.string())
283
+ arr_dim = pa.array(batch_dims, type=pa.int32())
284
+ # Build embeddings Arrow ListArray directly from NumPy for performance
285
+ embs_np = (
286
+ embs.astype(np.float32, copy=False)
287
+ if hasattr(embs, "astype")
288
+ else np.asarray(embs, dtype=np.float32)
289
+ )
290
+ n_rows = embs_np.shape[0]
291
+ offsets = pa.array(np.arange(0, (n_rows + 1) * dim, dim, dtype=np.int32))
292
+ values = pa.array(embs_np.reshape(-1), type=pa.float32())
293
+ arr_emb = pa.ListArray.from_arrays(offsets, values, type=list_float32)
294
+ table = pa.Table.from_arrays(
295
+ [arr_link, arr_name, arr_desc, arr_source, arr_dim, arr_emb],
296
+ schema=schema,
297
+ )
298
+ writer.write_table(table)
299
+ else:
300
+ with ThreadPoolExecutor(max_workers=concurrency) as ex:
301
+ with tqdm(total=total_batches, desc="Embedding", dynamic_ncols=True) as pbar:
302
+ next_to_submit = 0
303
+ futures = []
304
+ # Prime the executor with up to `concurrency` tasks
305
+ while next_to_submit < min(concurrency, total_batches):
306
+ futures.append(ex.submit(_submit_batch, next_to_submit))
307
+ next_to_submit += 1
308
+ pbar.set_postfix(inflight=len(futures), submitted=next_to_submit, refresh=True)
309
+ # As each future completes, write results and submit the next batch
310
+ while futures:
311
+ for fut in as_completed(futures, timeout=None):
312
+ futures.remove(fut)
313
+ b_idx, embs, batch_texts, batch_names, batch_links, batch_sources = fut.result()
314
+ if dim is None:
315
+ dim = (embs.shape[1] if hasattr(embs, "shape") else len(embs[0]))
316
+ batch_dims = [dim] * (embs.shape[0] if hasattr(embs, "shape") else len(embs))
317
+ arr_link = pa.array(batch_links, type=pa.string())
318
+ arr_name = pa.array(batch_names, type=pa.string())
319
+ arr_desc = pa.array(batch_texts, type=pa.string())
320
+ arr_source = pa.array(batch_sources, type=pa.string())
321
+ arr_dim = pa.array(batch_dims, type=pa.int32())
322
+ embs_np = (
323
+ embs.astype(np.float32, copy=False)
324
+ if hasattr(embs, "astype")
325
+ else np.asarray(embs, dtype=np.float32)
326
+ )
327
+ n_rows = embs_np.shape[0]
328
+ offsets = pa.array(np.arange(0, (n_rows + 1) * dim, dim, dtype=np.int32))
329
+ values = pa.array(embs_np.reshape(-1), type=pa.float32())
330
+ arr_emb = pa.ListArray.from_arrays(offsets, values, type=list_float32)
331
+ table = pa.Table.from_arrays(
332
+ [arr_link, arr_name, arr_desc, arr_source, arr_dim, arr_emb],
333
+ schema=schema,
334
+ )
335
+ writer.write_table(table)
336
+ pbar.update(1)
337
+ # Submit next batch if any remain
338
+ if next_to_submit < total_batches:
339
+ futures.append(ex.submit(_submit_batch, next_to_submit))
340
+ next_to_submit += 1
341
+ pbar.set_postfix(inflight=len(futures), submitted=next_to_submit, refresh=True)
342
+ # Break to re-enter as_completed with the updated futures list
343
+ break
344
+ finally:
345
+ if writer is not None:
346
+ writer.close()
347
+
348
+ print(f"Wrote embeddings for {len(df)} repos to {out_path}")
349
+
350
+
351
+ if __name__ == "__main__":
352
+ main()
data_collection_utils/embed_repo_descriptions_config.yaml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration for embed_repo_descriptions.py
2
+ # Paths are resolved relative to this file's directory
3
+
4
+ tei_url: https://mayonaise-mushroom-d1nbuod55i4as9x1.salad.cloud
5
+ model: tei
6
+ batch_size: 128
7
+ timeout: 180
8
+ ## Number of parallel requests to the TEI endpoint. Increase if your endpoint supports concurrency.
9
+ ## Set to 1 to run sequentially.
10
+ concurrency: 4
11
+ output: ../repo_description_embeddings.parquet
12
+
13
+ # Optional: prepend a single custom instruction to each text for models like Qwen3-Embedding.
14
+ # Applied only to embedding inputs:
15
+ # "{instruction}\nQuery: repo name: {name}\ndescription: {text}"
16
+ custom_instruction: |
17
+ Instruction: Produce an embedding vector that captures how much a GitHub repository is relevant to developer tooling, libraries, frameworks, or developer-focused documentation. Repositories unrelated to software development (like general ebooks, personal projects, or end-user applications) should have embeddings that are distant in vector space.
18
+
19
+ Context: Use the repository name and description to assess relevance.
20
+
21
+ Guidelines:
22
+ - Positive examples (should be considered developer tooling):
23
+ 1. "The library for web and native user interfaces." (facebook/react)
24
+ 2. "A modern build tool to bundle JavaScript applications." (vitejs/vite)
25
+ 3. "A Python framework for building web APIs quickly." (tiangolo/fastapi)
26
+
27
+ - Negative examples (should be considered irrelevant):
28
+ 1. "📚 Freely available programming books." (EbookFoundation/free-programming-books)
29
+ 2. "Collection of funny memes." (someuser/memes)
30
+ 3. "Personal finance tracker app." (someuser/fintrack)
31
+
32
+ Task: Return an embedding vector that represents the repository’s relevance to developer tools and developer-focused documentation.
33
+
34
+
35
+ # Environment variable name to read the Hugging Face/TEI token from
36
+ hf_token_env: HF_API_TOKEN
37
+
38
+ # Optional custom provider header (e.g., SaladCloud)
39
+ # The header value will be read from the environment variable named in custom_header_env
40
+ custom_header: Salad-Api-Key
41
+ custom_header_env: API_KEY
42
+
43
+ data_sources:
44
+ - path: ./awesome-repos.parquet
45
+ source: awesome
46
+ # - path: ./top-1000-repos.parquet
47
+ # source: top1000
data_collection_utils/top_1000_repos.py CHANGED
@@ -1,7 +1,9 @@
1
  from pathlib import Path
2
  from playwright.sync_api import sync_playwright
3
  from urllib.parse import urlparse
4
- from typing import List, Optional
 
 
5
  import pandas as pd
6
  from github_api_utils import get_repo_info
7
 
@@ -81,10 +83,18 @@ def map_to_original_repos(urls: List[str]) -> List[str]:
81
  o = resolve_to_original_repo(u)
82
  if o is not None:
83
  originals.add(o)
 
 
 
 
84
  return sorted(originals)
85
 
86
 
87
  def main() -> None:
 
 
 
 
88
  project_root = Path(__file__).resolve().parents[1]
89
  out_html = Path(__file__).with_name(
90
  "Top 1000 GitHub repositories, updated daily, all on one page..html"
@@ -100,87 +110,76 @@ def main() -> None:
100
  # Wait until at least one GitHub link is present in the DOM
101
  page.wait_for_selector('a[href*="https://github.com/"]', timeout=30000)
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  # Save rendered HTML
104
  html = page.content()
105
  out_html.write_text(html, encoding="utf-8")
106
 
107
- # Extract structured repo data from the rendered DOM
108
- items = page.eval_on_selector_all(
109
- 'a[href^="https://github.com/"]',
110
- r'''
111
- els => {
112
- const seen = new Map();
113
- function canonical(u) {
114
- try {
115
- const url = new URL(u);
116
- const parts = url.pathname.split('/').filter(Boolean);
117
- if (url.hostname !== 'github.com' || parts.length < 2) return null;
118
- return `https://github.com/${parts[0]}/${parts[1]}`;
119
- } catch { return null; }
120
- }
121
- function extractCardInfo(a) {
122
- const cu = canonical(a.href);
123
- if (!cu) return null;
124
- let container = a.closest('li, article, .repo, .card, .row, .item, .entry, .col') || a.parentElement;
125
- if (!container) container = a;
126
- const name = (a.textContent || '').trim();
127
- // description: first <p> with reasonable length
128
- let desc = '';
129
- const ps = container.querySelectorAll('p');
130
- for (const p of ps) {
131
- const t = (p.textContent || '').trim();
132
- if (t.length >= 10 && t.length <= 300) { desc = t; break; }
133
- }
134
- // stars: prefer contexts with star symbol or 'stars'
135
- let stars = null;
136
- const text = container.textContent || '';
137
- const m1 = text.match(/([0-9][0-9,._]*)\s*(★|⭐|stars?)/i);
138
- if (m1) {
139
- stars = parseInt(m1[1].replace(/[,_\.]/g, ''), 10);
140
- } else {
141
- const matches = Array.from(text.matchAll(/\b\d{1,3}(?:,\d{3})+|\b\d{2,6}\b/g));
142
- if (matches.length) {
143
- const nums = matches.map(m => parseInt(m[0].replace(/,/g, ''), 10)).filter(n => !Number.isNaN(n));
144
- if (nums.length) stars = Math.max(...nums);
145
- }
146
- }
147
- return { name, link: cu, description: desc, stars };
148
- }
149
- for (const a of els) {
150
- const info = extractCardInfo(a);
151
- if (!info) continue;
152
- if (seen.has(info.link)) continue;
153
- seen.set(info.link, info);
154
- }
155
- return Array.from(seen.values());
156
- }
157
- ''',
158
  )
159
-
160
- # Persist Parquet with schema: name, link, description, stars
161
- df = pd.DataFrame(items)
162
- # Fallback: if items list is empty, keep legacy behavior to at least emit links
163
- if df.empty:
164
- links = page.eval_on_selector_all(
165
- 'a[href*="https://github.com/"]',
166
- "els => Array.from(new Set(els.map(e => e.href))).sort()",
167
- )
168
- repo_links = normalize_github_repo_links(links)
169
- repo_links = map_to_original_repos(repo_links)
170
- with out_links.open("w", encoding="utf-8") as f:
171
- f.write("\n".join(repo_links) + "\n")
172
- print(f"Wrote HTML to {out_html}")
173
- print(
174
- f"Found {len(repo_links)} original GitHub repositories and saved to {out_links}"
175
- )
176
- else:
177
- # Also emit github_links.txt for compatibility
178
- repo_links = sorted({u for u in df["link"].tolist()})
179
- with out_links.open("w", encoding="utf-8") as f:
180
- f.write("\n".join(repo_links) + "\n")
181
- df.to_parquet(out_parquet, index=False)
182
- print(f"Wrote HTML to {out_html}")
183
- print(f"Saved {len(df)} repos to {out_parquet} and links to {out_links}")
184
 
185
  context.close()
186
  browser.close()
 
1
  from pathlib import Path
2
  from playwright.sync_api import sync_playwright
3
  from urllib.parse import urlparse
4
+ from typing import List, Optional, Dict, Any
5
+ from concurrent.futures import ThreadPoolExecutor, as_completed
6
+ import argparse
7
  import pandas as pd
8
  from github_api_utils import get_repo_info
9
 
 
83
  o = resolve_to_original_repo(u)
84
  if o is not None:
85
  originals.add(o)
86
+ else:
87
+ # keep the canonical URL if we couldn't resolve
88
+ cu = canonical_repo_url(u) or u
89
+ originals.add(cu)
90
  return sorted(originals)
91
 
92
 
93
  def main() -> None:
94
+ ap = argparse.ArgumentParser(description="Fetch Top 1000 repos and enrich via GitHub API")
95
+ ap.add_argument("--workers", type=int, default=16, help="Concurrency for GitHub API requests")
96
+ args = ap.parse_args()
97
+
98
  project_root = Path(__file__).resolve().parents[1]
99
  out_html = Path(__file__).with_name(
100
  "Top 1000 GitHub repositories, updated daily, all on one page..html"
 
110
  # Wait until at least one GitHub link is present in the DOM
111
  page.wait_for_selector('a[href*="https://github.com/"]', timeout=30000)
112
 
113
+ # Auto-scroll to force lazy loading/virtualized list to render all items
114
+ def _scroll_all(max_iters: int = 200, pause_ms: int = 300) -> None:
115
+ prev_count = 0
116
+ stable = 0
117
+ for _ in range(max_iters):
118
+ page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
119
+ page.wait_for_timeout(pause_ms)
120
+ count = page.eval_on_selector_all(
121
+ 'a[href^="https://github.com/"]', 'els => els.length'
122
+ )
123
+ if count <= prev_count:
124
+ stable += 1
125
+ else:
126
+ stable = 0
127
+ prev_count = count
128
+ # Stop after several iterations without growth or when clearly above 1000 anchors
129
+ if stable >= 10 or prev_count >= 1500:
130
+ break
131
+
132
+ _scroll_all()
133
+
134
  # Save rendered HTML
135
  html = page.content()
136
  out_html.write_text(html, encoding="utf-8")
137
 
138
+ # Extract canonical GitHub repo URLs from the DOM after full scroll
139
+ links = page.eval_on_selector_all(
140
+ 'a[href*="https://github.com/"]',
141
+ "els => Array.from(new Set(els.map(e => e.href))).sort()",
142
+ )
143
+ repo_links = normalize_github_repo_links(links)
144
+
145
+ # Optionally map any fork links to their original repositories and deduplicate
146
+ repo_links = map_to_original_repos(repo_links)
147
+
148
+ # Persist github_links.txt for visibility/debug (even if not used downstream)
149
+ with out_links.open("w", encoding="utf-8") as f:
150
+ f.write("\n".join(repo_links) + "\n")
151
+
152
+ # Enrich via GitHub API concurrently
153
+ def _one(url: str) -> Dict[str, Any]:
154
+ owner_repo = urlparse(url).path.strip("/").split("/")[:2]
155
+ owner, repo = owner_repo[0], owner_repo[1]
156
+ info = get_repo_info(owner, repo) or {}
157
+ name = info.get("name") or repo
158
+ desc = info.get("description") or None
159
+ stars = info.get("stargazers_count")
160
+ return {
161
+ "name": name,
162
+ "link": f"https://github.com/{owner}/{repo}",
163
+ "description": desc,
164
+ "stars": int(stars) if isinstance(stars, int) else None,
165
+ }
166
+
167
+ rows: List[Dict[str, Any]] = []
168
+ with ThreadPoolExecutor(max_workers=args.workers) as ex:
169
+ futs = [ex.submit(_one, u) for u in repo_links]
170
+ for fut in as_completed(futs):
171
+ try:
172
+ rows.append(fut.result())
173
+ except Exception:
174
+ # Skip on error; we aim for stability over strict completeness
175
+ continue
176
+
177
+ df = pd.DataFrame(rows)
178
+ df.to_parquet(out_parquet, index=False)
179
+ print(f"Wrote HTML to {out_html}")
180
+ print(
181
+ f"Saved {len(df)} repos to {out_parquet} and links ({len(repo_links)}) to {out_links}"
 
 
 
 
 
 
 
182
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
184
  context.close()
185
  browser.close()
requirements.txt CHANGED
@@ -7,4 +7,6 @@ langid
7
  playwright
8
  duckdb
9
  aiohttp
10
- python-dotenv
 
 
 
7
  playwright
8
  duckdb
9
  aiohttp
10
+ python-dotenv
11
+ huggingface_hub
12
+ numpy