(debug) wrong source file changed.
Browse files- README.md +5 -5
- clean/clean_docs_using_content.py +1 -1
- clean/data_stats.py +2 -2
- data_collection_utils/scrape_gh_docs.py +2 -2
README.md
CHANGED
|
@@ -22,7 +22,7 @@ GoodDocs-v0 is a text dataset scraped from high-quality documentation sources in
|
|
| 22 |
|
| 23 |
## What's in this repository
|
| 24 |
|
| 25 |
-
- `
|
| 26 |
- `awesome-repos.parquet` β structured links extracted from Awesome lists-of-lists (`name`, `link`, `description`, `source_repo`, optional `stars`).
|
| 27 |
- `data_collection_utils/` β utilities to regenerate the dataset:
|
| 28 |
- `scrape_gh_docs.py` β main scraper/collector for documentation from GitHub repositories.
|
|
@@ -34,7 +34,7 @@ GoodDocs-v0 is a text dataset scraped from high-quality documentation sources in
|
|
| 34 |
|
| 35 |
## Schema
|
| 36 |
|
| 37 |
-
|
| 38 |
|
| 39 |
- `owner`, `repo`, `repo_dir`
|
| 40 |
- `file_rel_repo` β path relative to the saved repo root
|
|
@@ -50,7 +50,7 @@ Load the dataset with pandas:
|
|
| 50 |
|
| 51 |
```python
|
| 52 |
import pandas as pd
|
| 53 |
-
df = pd.read_parquet("
|
| 54 |
print(len(df), "rows")
|
| 55 |
print(df.columns.tolist())
|
| 56 |
```
|
|
@@ -90,9 +90,9 @@ Configuration (YAML-driven; see `data_collection_utils/scrape_gh_docs_config.yam
|
|
| 90 |
- `workers`, `dry_run`, `quiet`, `no_fetch`
|
| 91 |
- `token_file` β GitHub token location (or set `GITHUB_TOKEN` env var)
|
| 92 |
- `prefer_sparse`, `prefer_zip`, `only_md`, `min_repo_age_years`
|
| 93 |
-
- `lang_filter`, `min_text_chars` β control language gating in `
|
| 94 |
|
| 95 |
-
Output is written to `<outdir>/
|
| 96 |
|
| 97 |
## Awesome list extraction
|
| 98 |
|
|
|
|
| 22 |
|
| 23 |
## What's in this repository
|
| 24 |
|
| 25 |
+
- `cleaned_texts_on_metadata_only.parquet` β per-file Markdown documents and metadata extracted from documentation trees.
|
| 26 |
- `awesome-repos.parquet` β structured links extracted from Awesome lists-of-lists (`name`, `link`, `description`, `source_repo`, optional `stars`).
|
| 27 |
- `data_collection_utils/` β utilities to regenerate the dataset:
|
| 28 |
- `scrape_gh_docs.py` β main scraper/collector for documentation from GitHub repositories.
|
|
|
|
| 34 |
|
| 35 |
## Schema
|
| 36 |
|
| 37 |
+
cleaned_texts_on_metadata_only.parquet β one row per Markdown file (see `md_rows` assembly in `main()`):
|
| 38 |
|
| 39 |
- `owner`, `repo`, `repo_dir`
|
| 40 |
- `file_rel_repo` β path relative to the saved repo root
|
|
|
|
| 50 |
|
| 51 |
```python
|
| 52 |
import pandas as pd
|
| 53 |
+
df = pd.read_parquet("cleaned_texts_on_metadata_only.parquet")
|
| 54 |
print(len(df), "rows")
|
| 55 |
print(df.columns.tolist())
|
| 56 |
```
|
|
|
|
| 90 |
- `workers`, `dry_run`, `quiet`, `no_fetch`
|
| 91 |
- `token_file` β GitHub token location (or set `GITHUB_TOKEN` env var)
|
| 92 |
- `prefer_sparse`, `prefer_zip`, `only_md`, `min_repo_age_years`
|
| 93 |
+
- `lang_filter`, `min_text_chars` β control language gating in `cleaned_texts_on_metadata_only.parquet`
|
| 94 |
|
| 95 |
+
Output is written to `<outdir>/cleaned_texts_on_metadata_only.parquet`.
|
| 96 |
|
| 97 |
## Awesome list extraction
|
| 98 |
|
clean/clean_docs_using_content.py
CHANGED
|
@@ -26,7 +26,7 @@ if __name__ == "__main__":
|
|
| 26 |
-- Step 1: Filter the base data (filename column is precomputed)
|
| 27 |
WITH filtered AS (
|
| 28 |
SELECT *
|
| 29 |
-
FROM 'output/
|
| 30 |
WHERE {conditions_str}
|
| 31 |
),
|
| 32 |
-- Step 2: Rank by mtime within each (owner, repo, filename) group to identify latest versions
|
|
|
|
| 26 |
-- Step 1: Filter the base data (filename column is precomputed)
|
| 27 |
WITH filtered AS (
|
| 28 |
SELECT *
|
| 29 |
+
FROM 'output/cleaned_texts_on_metadata_only.parquet'
|
| 30 |
WHERE {conditions_str}
|
| 31 |
),
|
| 32 |
-- Step 2: Rank by mtime within each (owner, repo, filename) group to identify latest versions
|
clean/data_stats.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
import duckdb
|
| 2 |
|
| 3 |
if __name__ == "__main__":
|
| 4 |
-
# Set source file: '
|
| 5 |
-
source_file = "output/
|
| 6 |
|
| 7 |
# Query to filter repos with more than 100 docs and fetch their README.md
|
| 8 |
query = f"""
|
|
|
|
| 1 |
import duckdb
|
| 2 |
|
| 3 |
if __name__ == "__main__":
|
| 4 |
+
# Set source file: 'cleaned_texts_on_metadata_only.parquet' or 'gooddocs.parquet'
|
| 5 |
+
source_file = "output/cleaned_texts_on_metadata_only.parquet"
|
| 6 |
|
| 7 |
# Query to filter repos with more than 100 docs and fetch their README.md
|
| 8 |
query = f"""
|
data_collection_utils/scrape_gh_docs.py
CHANGED
|
@@ -25,7 +25,7 @@ Typical usage:
|
|
| 25 |
|
| 26 |
Outputs:
|
| 27 |
- Saves files under `<outdir>/<owner>__<repo>/...`.
|
| 28 |
-
- Writes a per-file Parquet to `<outdir>/
|
| 29 |
- Appends repos with <10 .md files to `md-failed.txt`.
|
| 30 |
"""
|
| 31 |
|
|
@@ -1147,7 +1147,7 @@ def main():
|
|
| 1147 |
|
| 1148 |
# Export texts from DuckDB to Parquet
|
| 1149 |
texts_parquet_path = (
|
| 1150 |
-
Path(texts_parquet_value) if texts_parquet_value else (outdir / "
|
| 1151 |
)
|
| 1152 |
try:
|
| 1153 |
texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
| 25 |
|
| 26 |
Outputs:
|
| 27 |
- Saves files under `<outdir>/<owner>__<repo>/...`.
|
| 28 |
+
- Writes a per-file Parquet to `<outdir>/cleaned_texts_on_metadata_only.parquet`.
|
| 29 |
- Appends repos with <10 .md files to `md-failed.txt`.
|
| 30 |
"""
|
| 31 |
|
|
|
|
| 1147 |
|
| 1148 |
# Export texts from DuckDB to Parquet
|
| 1149 |
texts_parquet_path = (
|
| 1150 |
+
Path(texts_parquet_value) if texts_parquet_value else (outdir / "cleaned_texts_on_metadata_only.parquet")
|
| 1151 |
)
|
| 1152 |
try:
|
| 1153 |
texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)
|