gooddocs-v0 / data_util.py
MRiabov's picture
(data WIP) filtered out 100k duplicates and unquality data
a99eadd
raw
history blame
2.14 kB
import duckdb
if __name__ == "__main__":
# Set source file: 'texts.parquet' or 'gooddocs.parquet'
source_file = "gooddocs.parquet"
# Query to filter repos with more than 100 docs and fetch their README.md
query = f"""
WITH high_doc_repos AS (
SELECT owner, repo, COUNT(*) as doc_count
FROM '{source_file}'
WHERE file_rel_repo LIKE '%.md'
GROUP BY owner, repo
HAVING COUNT(*) > 100
)
SELECT t.owner, t.repo, t.file_rel_repo, t.content, h.doc_count
FROM '{source_file}' t
JOIN high_doc_repos h ON t.owner = h.owner AND t.repo = h.repo
WHERE t.file_rel_repo = 'README.md'
"""
readmes = duckdb.sql(query)
readmes.to_parquet("readmes_filtered.parquet")
print(
f"Filtered README.md files saved to readmes_filtered.parquet ({readmes.shape[0]} items)"
)
# Second query for repos with >500 docs
query2 = f"""
WITH high_doc_repos AS (
SELECT owner, repo, COUNT(*) as doc_count
FROM '{source_file}'
WHERE file_rel_repo LIKE '%.md'
GROUP BY owner, repo
HAVING COUNT(*) > 500
)
SELECT h.owner, h.repo, h.doc_count
FROM high_doc_repos h
"""
repos = duckdb.sql(query2)
repos.to_parquet("repos_over_500.parquet")
print(
f"Repos with >500 docs saved to repos_over_500.parquet ({repos.shape[0]} items)"
)
# Third query for non-unique doc names per repo
query3 = f"""
WITH filename_table AS (
SELECT owner, repo, filename
FROM '{source_file}'
WHERE file_rel_repo LIKE '%.md'
),
filename_counts AS (
SELECT owner, repo, filename, COUNT(*) as dupe_count
FROM filename_table
GROUP BY owner, repo, filename
HAVING COUNT(*) > 1
)
SELECT owner, repo, SUM(dupe_count) as total_non_unique_docs
FROM filename_counts
GROUP BY owner, repo
"""
non_unique = duckdb.sql(query3)
non_unique.to_parquet("non_unique_docs_count.parquet")
print(
f"Non-unique doc counts per repo saved to non_unique_docs_count.parquet ({non_unique.shape[0]} items)"
)