MRiabov commited on
Commit
660d674
Β·
1 Parent(s): 3356c03

(debug) wrong source file changed.

Browse files
README.md CHANGED
@@ -22,7 +22,7 @@ GoodDocs-v0 is a text dataset scraped from high-quality documentation sources in
22
 
23
  ## What's in this repository
24
 
25
- - `texts.parquet` β€” per-file Markdown documents and metadata extracted from documentation trees.
26
  - `awesome-repos.parquet` β€” structured links extracted from Awesome lists-of-lists (`name`, `link`, `description`, `source_repo`, optional `stars`).
27
  - `data_collection_utils/` β€” utilities to regenerate the dataset:
28
  - `scrape_gh_docs.py` β€” main scraper/collector for documentation from GitHub repositories.
@@ -34,7 +34,7 @@ GoodDocs-v0 is a text dataset scraped from high-quality documentation sources in
34
 
35
  ## Schema
36
 
37
- texts.parquet β€” one row per Markdown file (see `md_rows` assembly in `main()`):
38
 
39
  - `owner`, `repo`, `repo_dir`
40
  - `file_rel_repo` β€” path relative to the saved repo root
@@ -50,7 +50,7 @@ Load the dataset with pandas:
50
 
51
  ```python
52
  import pandas as pd
53
- df = pd.read_parquet("texts.parquet")
54
  print(len(df), "rows")
55
  print(df.columns.tolist())
56
  ```
@@ -90,9 +90,9 @@ Configuration (YAML-driven; see `data_collection_utils/scrape_gh_docs_config.yam
90
  - `workers`, `dry_run`, `quiet`, `no_fetch`
91
  - `token_file` β€” GitHub token location (or set `GITHUB_TOKEN` env var)
92
  - `prefer_sparse`, `prefer_zip`, `only_md`, `min_repo_age_years`
93
- - `lang_filter`, `min_text_chars` β€” control language gating in `texts.parquet`
94
 
95
- Output is written to `<outdir>/texts.parquet`.
96
 
97
  ## Awesome list extraction
98
 
 
22
 
23
  ## What's in this repository
24
 
25
+ - `cleaned_texts_on_metadata_only.parquet` β€” per-file Markdown documents and metadata extracted from documentation trees.
26
  - `awesome-repos.parquet` β€” structured links extracted from Awesome lists-of-lists (`name`, `link`, `description`, `source_repo`, optional `stars`).
27
  - `data_collection_utils/` β€” utilities to regenerate the dataset:
28
  - `scrape_gh_docs.py` β€” main scraper/collector for documentation from GitHub repositories.
 
34
 
35
  ## Schema
36
 
37
+ cleaned_texts_on_metadata_only.parquet β€” one row per Markdown file (see `md_rows` assembly in `main()`):
38
 
39
  - `owner`, `repo`, `repo_dir`
40
  - `file_rel_repo` β€” path relative to the saved repo root
 
50
 
51
  ```python
52
  import pandas as pd
53
+ df = pd.read_parquet("cleaned_texts_on_metadata_only.parquet")
54
  print(len(df), "rows")
55
  print(df.columns.tolist())
56
  ```
 
90
  - `workers`, `dry_run`, `quiet`, `no_fetch`
91
  - `token_file` β€” GitHub token location (or set `GITHUB_TOKEN` env var)
92
  - `prefer_sparse`, `prefer_zip`, `only_md`, `min_repo_age_years`
93
+ - `lang_filter`, `min_text_chars` β€” control language gating in `cleaned_texts_on_metadata_only.parquet`
94
 
95
+ Output is written to `<outdir>/cleaned_texts_on_metadata_only.parquet`.
96
 
97
  ## Awesome list extraction
98
 
clean/clean_docs_using_content.py CHANGED
@@ -26,7 +26,7 @@ if __name__ == "__main__":
26
  -- Step 1: Filter the base data (filename column is precomputed)
27
  WITH filtered AS (
28
  SELECT *
29
- FROM 'output/texts.parquet'
30
  WHERE {conditions_str}
31
  ),
32
  -- Step 2: Rank by mtime within each (owner, repo, filename) group to identify latest versions
 
26
  -- Step 1: Filter the base data (filename column is precomputed)
27
  WITH filtered AS (
28
  SELECT *
29
+ FROM 'output/cleaned_texts_on_metadata_only.parquet'
30
  WHERE {conditions_str}
31
  ),
32
  -- Step 2: Rank by mtime within each (owner, repo, filename) group to identify latest versions
clean/data_stats.py CHANGED
@@ -1,8 +1,8 @@
1
  import duckdb
2
 
3
  if __name__ == "__main__":
4
- # Set source file: 'texts.parquet' or 'gooddocs.parquet'
5
- source_file = "output/texts.parquet"
6
 
7
  # Query to filter repos with more than 100 docs and fetch their README.md
8
  query = f"""
 
1
  import duckdb
2
 
3
  if __name__ == "__main__":
4
+ # Set source file: 'cleaned_texts_on_metadata_only.parquet' or 'gooddocs.parquet'
5
+ source_file = "output/cleaned_texts_on_metadata_only.parquet"
6
 
7
  # Query to filter repos with more than 100 docs and fetch their README.md
8
  query = f"""
data_collection_utils/scrape_gh_docs.py CHANGED
@@ -25,7 +25,7 @@ Typical usage:
25
 
26
  Outputs:
27
  - Saves files under `<outdir>/<owner>__<repo>/...`.
28
- - Writes a per-file Parquet to `<outdir>/texts.parquet`.
29
  - Appends repos with <10 .md files to `md-failed.txt`.
30
  """
31
 
@@ -1147,7 +1147,7 @@ def main():
1147
 
1148
  # Export texts from DuckDB to Parquet
1149
  texts_parquet_path = (
1150
- Path(texts_parquet_value) if texts_parquet_value else (outdir / "texts.parquet")
1151
  )
1152
  try:
1153
  texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)
 
25
 
26
  Outputs:
27
  - Saves files under `<outdir>/<owner>__<repo>/...`.
28
+ - Writes a per-file Parquet to `<outdir>/cleaned_texts_on_metadata_only.parquet`.
29
  - Appends repos with <10 .md files to `md-failed.txt`.
30
  """
31
 
 
1147
 
1148
  # Export texts from DuckDB to Parquet
1149
  texts_parquet_path = (
1150
+ Path(texts_parquet_value) if texts_parquet_value else (outdir / "cleaned_texts_on_metadata_only.parquet")
1151
  )
1152
  try:
1153
  texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)