File size: 2,136 Bytes
a99eadd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import duckdb
if __name__ == "__main__":
# Set source file: 'texts.parquet' or 'gooddocs.parquet'
source_file = "gooddocs.parquet"
# Query to filter repos with more than 100 docs and fetch their README.md
query = f"""
WITH high_doc_repos AS (
SELECT owner, repo, COUNT(*) as doc_count
FROM '{source_file}'
WHERE file_rel_repo LIKE '%.md'
GROUP BY owner, repo
HAVING COUNT(*) > 100
)
SELECT t.owner, t.repo, t.file_rel_repo, t.content, h.doc_count
FROM '{source_file}' t
JOIN high_doc_repos h ON t.owner = h.owner AND t.repo = h.repo
WHERE t.file_rel_repo = 'README.md'
"""
readmes = duckdb.sql(query)
readmes.to_parquet("readmes_filtered.parquet")
print(
f"Filtered README.md files saved to readmes_filtered.parquet ({readmes.shape[0]} items)"
)
# Second query for repos with >500 docs
query2 = f"""
WITH high_doc_repos AS (
SELECT owner, repo, COUNT(*) as doc_count
FROM '{source_file}'
WHERE file_rel_repo LIKE '%.md'
GROUP BY owner, repo
HAVING COUNT(*) > 500
)
SELECT h.owner, h.repo, h.doc_count
FROM high_doc_repos h
"""
repos = duckdb.sql(query2)
repos.to_parquet("repos_over_500.parquet")
print(
f"Repos with >500 docs saved to repos_over_500.parquet ({repos.shape[0]} items)"
)
# Third query for non-unique doc names per repo
query3 = f"""
WITH filename_table AS (
SELECT owner, repo, filename
FROM '{source_file}'
WHERE file_rel_repo LIKE '%.md'
),
filename_counts AS (
SELECT owner, repo, filename, COUNT(*) as dupe_count
FROM filename_table
GROUP BY owner, repo, filename
HAVING COUNT(*) > 1
)
SELECT owner, repo, SUM(dupe_count) as total_non_unique_docs
FROM filename_counts
GROUP BY owner, repo
"""
non_unique = duckdb.sql(query3)
non_unique.to_parquet("non_unique_docs_count.parquet")
print(
f"Non-unique doc counts per repo saved to non_unique_docs_count.parquet ({non_unique.shape[0]} items)"
)
|