MRiabov commited on
Commit
1068e07
·
1 Parent(s): c6bfa44

(optim) faster rebuild

Browse files
clean/clean_docs_using_content.py CHANGED
@@ -26,7 +26,7 @@ if __name__ == "__main__":
26
  -- Step 1: Filter the base data (filename column is precomputed)
27
  WITH filtered AS (
28
  SELECT *
29
- FROM 'texts.parquet'
30
  WHERE {conditions_str}
31
  ),
32
  -- Step 2: Rank by mtime within each (owner, repo, filename) group to identify latest versions
 
26
  -- Step 1: Filter the base data (filename column is precomputed)
27
  WITH filtered AS (
28
  SELECT *
29
+ FROM 'output/texts.parquet'
30
  WHERE {conditions_str}
31
  ),
32
  -- Step 2: Rank by mtime within each (owner, repo, filename) group to identify latest versions
clean/data_stats.py CHANGED
@@ -2,7 +2,7 @@ import duckdb
2
 
3
  if __name__ == "__main__":
4
  # Set source file: 'texts.parquet' or 'gooddocs.parquet'
5
- source_file = "texts.parquet"
6
 
7
  # Query to filter repos with more than 100 docs and fetch their README.md
8
  query = f"""
 
2
 
3
  if __name__ == "__main__":
4
  # Set source file: 'texts.parquet' or 'gooddocs.parquet'
5
+ source_file = "output/texts.parquet"
6
 
7
  # Query to filter repos with more than 100 docs and fetch their README.md
8
  query = f"""
data_collection_utils/fetch_gh_meta.py CHANGED
@@ -162,7 +162,7 @@ def main():
162
 
163
  # Fetch in batches via GraphQL
164
  records: List[Dict[str, Any]] = []
165
- run_ts = datetime.now(datetime.timezone.utc).isoformat()
166
  for i in tqdm(range(0, len(pairs), batch_size), desc="GraphQL batches"):
167
  batch = pairs[i : i + batch_size]
168
  meta = fetch_repos_metadata_graphql(batch)
 
162
 
163
  # Fetch in batches via GraphQL
164
  records: List[Dict[str, Any]] = []
165
+ run_ts = datetime.utcnow().isoformat()
166
  for i in tqdm(range(0, len(pairs), batch_size), desc="GraphQL batches"):
167
  batch = pairs[i : i + batch_size]
168
  meta = fetch_repos_metadata_graphql(batch)
data_collection_utils/scrape_gh_docs.py CHANGED
@@ -159,23 +159,46 @@ def collect_md_rows_for_repo_dir(
159
  except ValueError:
160
  return []
161
  rows: List[Dict[str, Any]] = []
162
- for md_file in d.rglob("*.md"):
163
- if not md_file.is_file():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  continue
165
  try:
166
- rel_repo = md_file.relative_to(d)
167
  except Exception:
168
- rel_repo = md_file.name
169
- text = md_file.read_text(encoding="utf-8", errors="replace")
170
  lang_code = None
171
  include = True
172
- if len(text) >= min_text_chars_value:
173
- lid_code, _ = langid.classify(text)
174
- lang_code = lid_code
175
- if lang_filter_value is not None and str(lang_filter_value).strip() != "":
 
 
 
 
 
176
  include = lang_code == lang_filter_value
177
- else:
178
- if lang_filter_value is not None and str(lang_filter_value).strip() != "":
179
  include = False
180
  if include:
181
  row = {
@@ -183,9 +206,9 @@ def collect_md_rows_for_repo_dir(
183
  "repo": repo,
184
  "repo_dir": d.name,
185
  "file_rel_repo": str(rel_repo),
186
- "file_rel_outdir": str(md_file.relative_to(outdir)),
187
- "size": md_file.stat().st_size,
188
- "mtime": int(md_file.stat().st_mtime),
189
  "lang": lang_code,
190
  "content": text,
191
  "updated_at": updated_at,
@@ -885,6 +908,7 @@ def main():
885
  no_fetch_value = bool(args.no_fetch or cfg.get("no_fetch", False))
886
  lang_filter_value = cfg.get("lang_filter", "en")
887
  min_text_chars_value = int(cfg.get("min_text_chars", 200))
 
888
 
889
  def _resolve_cfg_paths(val):
890
  if val is None:
@@ -911,6 +935,7 @@ def main():
911
  )
912
  con = duckdb.connect(str(duckdb_path))
913
  _init_duckdb(con)
 
914
 
915
  md_failed_path = Path(md_failed_value)
916
  # create/empty md_failed file
@@ -952,6 +977,7 @@ def main():
952
  seen_raw_invalid.add(s)
953
  lines.append(s)
954
 
 
955
  # Initialize locks for thread-safe writes and results accumulation
956
  md_failed_lock = threading.Lock()
957
  results_lock = threading.Lock()
@@ -959,7 +985,7 @@ def main():
959
  duckdb_lock = threading.Lock()
960
 
961
  # Process repositories concurrently
962
- run_ts = datetime.now(datetime.timezone.utc).isoformat()
963
  with tqdm(total=len(lines), desc="Repos") as pbar:
964
 
965
  def _run(lr: str):
@@ -1017,6 +1043,7 @@ def main():
1017
  )
1018
  con.unregister("df_txt_one")
1019
  con.execute("COMMIT")
 
1020
  except Exception as e:
1021
  logger.error(f"Exception while processing {lr}: {e}")
1022
  append_line_threadsafe(
@@ -1054,6 +1081,7 @@ def main():
1054
  for d in outdir.iterdir()
1055
  if d.is_dir() and "__" in d.name and not d.name.startswith("tmp_")
1056
  ]
 
1057
  # Rebuild texts table from filesystem by streaming per-repo inserts into DuckDB
1058
  cols = [
1059
  "owner",
@@ -1068,12 +1096,19 @@ def main():
1068
  "updated_at",
1069
  ]
1070
  total_inserted = 0
 
1071
  with duckdb_lock:
1072
  con.execute("DELETE FROM texts")
1073
  with tqdm(total=len(repo_dirs), desc="Collecting per-file rows (repos)") as pbar:
1074
- with concurrent.futures.ThreadPoolExecutor(
1075
- max_workers=workers_value
1076
- ) as executor:
 
 
 
 
 
 
1077
  futures = [
1078
  executor.submit(
1079
  collect_md_rows_for_repo_dir,
@@ -1098,6 +1133,7 @@ def main():
1098
  con.execute("INSERT INTO texts SELECT * FROM df_txt_chunk")
1099
  con.unregister("df_txt_chunk")
1100
  total_inserted += len(rows)
 
1101
  pbar.update(1)
1102
 
1103
  # Export texts from DuckDB to Parquet
@@ -1106,9 +1142,11 @@ def main():
1106
  )
1107
  try:
1108
  texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)
 
 
 
1109
  con.execute(
1110
- "COPY (SELECT * FROM texts) TO ? (FORMAT PARQUET)",
1111
- [str(texts_parquet_path)],
1112
  )
1113
  logger.info(
1114
  f"Wrote per-file dataset to {texts_parquet_path} (rows={total_inserted})"
 
159
  except ValueError:
160
  return []
161
  rows: List[Dict[str, Any]] = []
162
+ # Collect candidate files:
163
+ # - All Markdown files (*.md) under repo dir
164
+ # - Plus README files at repo root with any extension (README, README.md, README.rst, etc.)
165
+ candidates: List[Path] = []
166
+ candidates.extend(d.rglob("*.md"))
167
+ # Add root-level README variants (case-insensitive)
168
+ for p in d.iterdir():
169
+ if p.is_file() and p.name.lower().startswith("readme"):
170
+ candidates.append(p)
171
+ # Deduplicate while preserving order
172
+ seen_paths = set()
173
+ unique_candidates: List[Path] = []
174
+ for p in candidates:
175
+ rp = p.resolve()
176
+ if rp in seen_paths:
177
+ continue
178
+ seen_paths.add(rp)
179
+ unique_candidates.append(p)
180
+
181
+ for fpath in unique_candidates:
182
+ if not fpath.is_file():
183
  continue
184
  try:
185
+ rel_repo = fpath.relative_to(d)
186
  except Exception:
187
+ rel_repo = fpath.name
188
+ text = fpath.read_text(encoding="utf-8", errors="replace")
189
  lang_code = None
190
  include = True
191
+ # Always include README files regardless of length/language
192
+ is_readme = fpath.parent == d and fpath.name.lower().startswith("readme")
193
+ should_filter_lang = (
194
+ lang_filter_value is not None and str(lang_filter_value).strip() != ""
195
+ )
196
+ if not is_readme and should_filter_lang:
197
+ if len(text) >= min_text_chars_value:
198
+ lid_code, _ = langid.classify(text)
199
+ lang_code = lid_code
200
  include = lang_code == lang_filter_value
201
+ else:
 
202
  include = False
203
  if include:
204
  row = {
 
206
  "repo": repo,
207
  "repo_dir": d.name,
208
  "file_rel_repo": str(rel_repo),
209
+ "file_rel_outdir": str(fpath.relative_to(outdir)),
210
+ "size": fpath.stat().st_size,
211
+ "mtime": int(fpath.stat().st_mtime),
212
  "lang": lang_code,
213
  "content": text,
214
  "updated_at": updated_at,
 
908
  no_fetch_value = bool(args.no_fetch or cfg.get("no_fetch", False))
909
  lang_filter_value = cfg.get("lang_filter", "en")
910
  min_text_chars_value = int(cfg.get("min_text_chars", 200))
911
+ postprocess_executor_value = str(cfg.get("postprocess_executor", "thread")).lower()
912
 
913
  def _resolve_cfg_paths(val):
914
  if val is None:
 
935
  )
936
  con = duckdb.connect(str(duckdb_path))
937
  _init_duckdb(con)
938
+ logger.info(f"Opened DuckDB at '{duckdb_path}'")
939
 
940
  md_failed_path = Path(md_failed_value)
941
  # create/empty md_failed file
 
977
  seen_raw_invalid.add(s)
978
  lines.append(s)
979
 
980
+ logger.info(f"Will process {len(lines)} repo entries (fetch={'off' if no_fetch_value else 'on'})")
981
  # Initialize locks for thread-safe writes and results accumulation
982
  md_failed_lock = threading.Lock()
983
  results_lock = threading.Lock()
 
985
  duckdb_lock = threading.Lock()
986
 
987
  # Process repositories concurrently
988
+ run_ts = datetime.utcnow().isoformat()
989
  with tqdm(total=len(lines), desc="Repos") as pbar:
990
 
991
  def _run(lr: str):
 
1043
  )
1044
  con.unregister("df_txt_one")
1045
  con.execute("COMMIT")
1046
+ logger.info(f"[incremental] {owner}/{repo}: inserted {len(rows_one)} rows into DuckDB")
1047
  except Exception as e:
1048
  logger.error(f"Exception while processing {lr}: {e}")
1049
  append_line_threadsafe(
 
1081
  for d in outdir.iterdir()
1082
  if d.is_dir() and "__" in d.name and not d.name.startswith("tmp_")
1083
  ]
1084
+ logger.info(f"Starting post-processing scan of {len(repo_dirs)} repos under '{outdir}' to rebuild per-file dataset")
1085
  # Rebuild texts table from filesystem by streaming per-repo inserts into DuckDB
1086
  cols = [
1087
  "owner",
 
1096
  "updated_at",
1097
  ]
1098
  total_inserted = 0
1099
+ logger.info("Clearing DuckDB 'texts' table for full rebuild")
1100
  with duckdb_lock:
1101
  con.execute("DELETE FROM texts")
1102
  with tqdm(total=len(repo_dirs), desc="Collecting per-file rows (repos)") as pbar:
1103
+ executor_cls = (
1104
+ concurrent.futures.ProcessPoolExecutor
1105
+ if postprocess_executor_value == "process"
1106
+ else concurrent.futures.ThreadPoolExecutor
1107
+ )
1108
+ logger.info(
1109
+ f"Post-processing executor: {executor_cls.__name__} with workers={workers_value}"
1110
+ )
1111
+ with executor_cls(max_workers=workers_value) as executor:
1112
  futures = [
1113
  executor.submit(
1114
  collect_md_rows_for_repo_dir,
 
1133
  con.execute("INSERT INTO texts SELECT * FROM df_txt_chunk")
1134
  con.unregister("df_txt_chunk")
1135
  total_inserted += len(rows)
1136
+ pbar.set_postfix_str(f"rows={total_inserted}")
1137
  pbar.update(1)
1138
 
1139
  # Export texts from DuckDB to Parquet
 
1142
  )
1143
  try:
1144
  texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)
1145
+ logger.info(f"Exporting DuckDB 'texts' table to Parquet at '{texts_parquet_path}' ...")
1146
+ # DuckDB does not support parameter binding for COPY target; embed the path directly
1147
+ out_path_sql = str(texts_parquet_path).replace("'", "''")
1148
  con.execute(
1149
+ f"COPY (SELECT * FROM texts) TO '{out_path_sql}' (FORMAT PARQUET)"
 
1150
  )
1151
  logger.info(
1152
  f"Wrote per-file dataset to {texts_parquet_path} (rows={total_inserted})"