gooddocs-v0 / filter.py
MRiabov's picture
(data WIP) filtered out 100k duplicates and unquality data
a99eadd
raw
history blame
2.02 kB
import duckdb
# schema
# | column_name | string |
# |-------------|--------|
# | data_type | string |
# | owner | VARCHAR |
# | repo | VARCHAR |
# | repo_dir | VARCHAR |
# | file_rel_repo | VARCHAR |
# | file_rel_outdir | VARCHAR |
# | size | BIGINT |
if __name__ == "__main__":
# Define filtering conditions
# Exclude versioned documentation folders
# Exclude specific examples from 'appwrite' repo except for version 1.7.x
conditions = [
"file_rel_repo NOT LIKE '/docs/versioned_docs%'",
"NOT (repo = 'appwrite' AND file_rel_repo LIKE 'docs/examples%' AND file_rel_repo NOT LIKE 'docs/examples/1.7.x/%')",
]
conditions_str = " AND ".join(conditions)
# Build the query with CTEs for readability
query = f"""
-- Step 1: Filter the base data (filename column is precomputed)
WITH filtered AS (
SELECT *
FROM 'texts.parquet'
WHERE {conditions_str}
),
-- Step 2: Rank by mtime within each (owner, repo, filename) group to identify latest versions
ranked_mtime AS (
SELECT *, ROW_NUMBER() OVER (PARTITION BY owner, repo, filename ORDER BY mtime DESC) as rn_mtime
FROM filtered
),
-- Step 3: Keep only the latest version for duplicates, but preserve all README.md files
filtered_dup AS (
SELECT * FROM ranked_mtime
WHERE rn_mtime = 1 OR filename LIKE 'README.md'
),
-- Step 4: Rank by content to remove duplicate content, keeping the first occurrence
ranked AS (
SELECT *, ROW_NUMBER() OVER (PARTITION BY content ORDER BY owner, repo, file_rel_repo) as rn
FROM filtered_dup
)
-- Final selection: unique content after filename deduplication
SELECT owner, repo, repo_dir, file_rel_repo, file_rel_outdir, size, mtime, lang, content, filename
FROM ranked
WHERE rn = 1
"""
rel = duckdb.sql(query)
rel.to_parquet("gooddocs.parquet")
print(f"Filtered documents saved to gooddocs.parquet ({rel.shape[0]} items)")