MRiabov commited on
Commit
c554ef1
·
1 Parent(s): 5e6838c

Remove the .txt for storing data

Browse files
data_collection_utils/parse_gh_docs_config.yaml CHANGED
@@ -1,8 +1,14 @@
1
  # Global configuration for scrape_gh_docs.py
2
  # All configuration is driven by this file. The CLI only supports --no-fetch for convenience.
3
 
4
- # Input: file with one repo per line; either owner/repo or a full GitHub URL.
5
- input: ./github_links.txt
 
 
 
 
 
 
6
 
7
  # Output directories/files
8
  outdir: ../output
 
1
  # Global configuration for scrape_gh_docs.py
2
  # All configuration is driven by this file. The CLI only supports --no-fetch for convenience.
3
 
4
+ # Inputs
5
+ # Preferred: one or more Parquet files produced by our collectors, each with a 'link' column
6
+ # Examples:
7
+ # - data_collection_utils/awesome_final_repos.py -> awesome-repos.parquet
8
+ # - data_collection_utils/top_1000_repos.py -> top-1000-repos.parquet
9
+ input_parquet:
10
+ - ./awesome-repos.parquet
11
+ - ./top-1000-repos.parquet
12
 
13
  # Output directories/files
14
  outdir: ../output
data_collection_utils/scrape_gh_docs.py CHANGED
@@ -869,7 +869,6 @@ def main():
869
  p = (cfg_path.parent / p).resolve()
870
  return str(p)
871
 
872
- input_value = _resolve_cfg_path(cfg.get("input"))
873
  outdir_value = _resolve_cfg_path(cfg.get("outdir", "output"))
874
  md_failed_value = _resolve_cfg_path(cfg.get("md_failed", "md-failed.txt"))
875
  dry_run_value = bool(cfg.get("dry_run", False))
@@ -886,6 +885,15 @@ def main():
886
  lang_filter_value = cfg.get("lang_filter", "en")
887
  min_text_chars_value = int(cfg.get("min_text_chars", 200))
888
 
 
 
 
 
 
 
 
 
 
889
  # Configure logging (use tqdm-friendly handler)
890
  setup_logging(quiet=quiet_value)
891
 
@@ -903,15 +911,25 @@ def main():
903
  if no_fetch_value:
904
  lines: List[str] = []
905
  else:
906
- in_path = Path(input_value)
907
- if not in_path.exists():
908
- logger.error(f"Input file {in_path} does not exist.")
 
 
909
  sys.exit(2)
910
- lines = [
911
- line_str.strip()
912
- for line_str in in_path.read_text(encoding="utf-8").splitlines()
913
- if line_str.strip()
914
- ]
 
 
 
 
 
 
 
 
915
 
916
  # Initialize locks for thread-safe writes and results accumulation
917
  md_failed_lock = threading.Lock()
 
869
  p = (cfg_path.parent / p).resolve()
870
  return str(p)
871
 
 
872
  outdir_value = _resolve_cfg_path(cfg.get("outdir", "output"))
873
  md_failed_value = _resolve_cfg_path(cfg.get("md_failed", "md-failed.txt"))
874
  dry_run_value = bool(cfg.get("dry_run", False))
 
885
  lang_filter_value = cfg.get("lang_filter", "en")
886
  min_text_chars_value = int(cfg.get("min_text_chars", 200))
887
 
888
+ def _resolve_cfg_paths(val):
889
+ if val is None:
890
+ return []
891
+ if isinstance(val, (list, tuple)):
892
+ return [_resolve_cfg_path(v) for v in val if v is not None]
893
+ return [_resolve_cfg_path(val)]
894
+
895
+ input_parquet_values = _resolve_cfg_paths(cfg.get("input_parquet"))
896
+
897
  # Configure logging (use tqdm-friendly handler)
898
  setup_logging(quiet=quiet_value)
899
 
 
911
  if no_fetch_value:
912
  lines: List[str] = []
913
  else:
914
+ lines: List[str] = []
915
+ if not input_parquet_values:
916
+ logger.error(
917
+ "'input_parquet' is required. Configure one or more Parquet files with a 'link' column in parse_gh_docs_config.yaml."
918
+ )
919
  sys.exit(2)
920
+ # Read repositories from one or more Parquet files; use 'link' column
921
+ seen = set()
922
+ for pth in input_parquet_values:
923
+ df = pd.read_parquet(pth)
924
+ assert "link" in df.columns, f"Parquet {pth} must contain 'link' column"
925
+ for u in df["link"].tolist():
926
+ s = str(u).strip()
927
+ if not s:
928
+ continue
929
+ # We accept full GitHub URLs; process_repo_entry will handle them
930
+ if s not in seen:
931
+ seen.add(s)
932
+ lines.append(s)
933
 
934
  # Initialize locks for thread-safe writes and results accumulation
935
  md_failed_lock = threading.Lock()
data_collection_utils/top_1000_repos.py CHANGED
@@ -2,6 +2,7 @@ from pathlib import Path
2
  from playwright.sync_api import sync_playwright
3
  from urllib.parse import urlparse
4
  from typing import List, Optional
 
5
  from github_api_utils import get_repo_info
6
 
7
  URL = "https://top1000repos.com/"
@@ -89,6 +90,7 @@ def main() -> None:
89
  "Top 1000 GitHub repositories, updated daily, all on one page..html"
90
  )
91
  out_links = project_root / "github_links.txt"
 
92
 
93
  with sync_playwright() as p:
94
  browser = p.chromium.launch(headless=True)
@@ -102,22 +104,83 @@ def main() -> None:
102
  html = page.content()
103
  out_html.write_text(html, encoding="utf-8")
104
 
105
- # Extract GitHub links directly from the DOM
106
- links = page.eval_on_selector_all(
107
- 'a[href*="https://github.com/"]',
108
- "els => Array.from(new Set(els.map(e => e.href))).sort()",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  )
110
 
111
- repo_links = normalize_github_repo_links(links)
112
- # Map any fork links to their original repositories and deduplicate
113
- repo_links = map_to_original_repos(repo_links)
114
- with out_links.open("w", encoding="utf-8") as f:
115
- f.write("\n".join(repo_links) + "\n")
116
-
117
- print(f"Wrote HTML to {out_html}")
118
- print(
119
- f"Found {len(repo_links)} original GitHub repositories and saved to {out_links}"
120
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
  context.close()
123
  browser.close()
 
2
  from playwright.sync_api import sync_playwright
3
  from urllib.parse import urlparse
4
  from typing import List, Optional
5
+ import pandas as pd
6
  from github_api_utils import get_repo_info
7
 
8
  URL = "https://top1000repos.com/"
 
90
  "Top 1000 GitHub repositories, updated daily, all on one page..html"
91
  )
92
  out_links = project_root / "github_links.txt"
93
+ out_parquet = Path(__file__).with_name("top-1000-repos.parquet")
94
 
95
  with sync_playwright() as p:
96
  browser = p.chromium.launch(headless=True)
 
104
  html = page.content()
105
  out_html.write_text(html, encoding="utf-8")
106
 
107
+ # Extract structured repo data from the rendered DOM
108
+ items = page.eval_on_selector_all(
109
+ 'a[href^="https://github.com/"]',
110
+ r'''
111
+ els => {
112
+ const seen = new Map();
113
+ function canonical(u) {
114
+ try {
115
+ const url = new URL(u);
116
+ const parts = url.pathname.split('/').filter(Boolean);
117
+ if (url.hostname !== 'github.com' || parts.length < 2) return null;
118
+ return `https://github.com/${parts[0]}/${parts[1]}`;
119
+ } catch { return null; }
120
+ }
121
+ function extractCardInfo(a) {
122
+ const cu = canonical(a.href);
123
+ if (!cu) return null;
124
+ let container = a.closest('li, article, .repo, .card, .row, .item, .entry, .col') || a.parentElement;
125
+ if (!container) container = a;
126
+ const name = (a.textContent || '').trim();
127
+ // description: first <p> with reasonable length
128
+ let desc = '';
129
+ const ps = container.querySelectorAll('p');
130
+ for (const p of ps) {
131
+ const t = (p.textContent || '').trim();
132
+ if (t.length >= 10 && t.length <= 300) { desc = t; break; }
133
+ }
134
+ // stars: prefer contexts with star symbol or 'stars'
135
+ let stars = null;
136
+ const text = container.textContent || '';
137
+ const m1 = text.match(/([0-9][0-9,._]*)\s*(★|⭐|stars?)/i);
138
+ if (m1) {
139
+ stars = parseInt(m1[1].replace(/[,_\.]/g, ''), 10);
140
+ } else {
141
+ const matches = Array.from(text.matchAll(/\b\d{1,3}(?:,\d{3})+|\b\d{2,6}\b/g));
142
+ if (matches.length) {
143
+ const nums = matches.map(m => parseInt(m[0].replace(/,/g, ''), 10)).filter(n => !Number.isNaN(n));
144
+ if (nums.length) stars = Math.max(...nums);
145
+ }
146
+ }
147
+ return { name, link: cu, description: desc, stars };
148
+ }
149
+ for (const a of els) {
150
+ const info = extractCardInfo(a);
151
+ if (!info) continue;
152
+ if (seen.has(info.link)) continue;
153
+ seen.set(info.link, info);
154
+ }
155
+ return Array.from(seen.values());
156
+ }
157
+ ''',
158
  )
159
 
160
+ # Persist Parquet with schema: name, link, description, stars
161
+ df = pd.DataFrame(items)
162
+ # Fallback: if items list is empty, keep legacy behavior to at least emit links
163
+ if df.empty:
164
+ links = page.eval_on_selector_all(
165
+ 'a[href*="https://github.com/"]',
166
+ "els => Array.from(new Set(els.map(e => e.href))).sort()",
167
+ )
168
+ repo_links = normalize_github_repo_links(links)
169
+ repo_links = map_to_original_repos(repo_links)
170
+ with out_links.open("w", encoding="utf-8") as f:
171
+ f.write("\n".join(repo_links) + "\n")
172
+ print(f"Wrote HTML to {out_html}")
173
+ print(
174
+ f"Found {len(repo_links)} original GitHub repositories and saved to {out_links}"
175
+ )
176
+ else:
177
+ # Also emit github_links.txt for compatibility
178
+ repo_links = sorted({u for u in df["link"].tolist()})
179
+ with out_links.open("w", encoding="utf-8") as f:
180
+ f.write("\n".join(repo_links) + "\n")
181
+ df.to_parquet(out_parquet, index=False)
182
+ print(f"Wrote HTML to {out_html}")
183
+ print(f"Saved {len(df)} repos to {out_parquet} and links to {out_links}")
184
 
185
  context.close()
186
  browser.close()