MRiabov commited on
Commit
972b153
·
1 Parent(s): 7a75841

(data) clean too short files, strip yaml front matter

Browse files
clean/clean_docs_using_content.py CHANGED
@@ -37,18 +37,28 @@ if __name__ == "__main__":
37
  -- Step 3: Keep only the latest version for duplicates, but preserve all README.md and Contributing.md files
38
  filtered_dup AS (
39
  SELECT * FROM ranked_mtime
40
- WHERE rn_mtime = 1 OR file_rel_repo LIKE 'README.md' OR filename ILIKE 'contributing.md'
41
  ),
42
- -- Step 4: Rank by content to remove duplicate content, keeping the first occurrence
43
- ranked AS (
44
- SELECT *, ROW_NUMBER() OVER (PARTITION BY content ORDER BY owner, repo, file_rel_repo) as rn
45
  FROM filtered_dup
 
 
 
 
 
 
 
 
 
 
46
  )
47
  -- Final selection: unique content after filename deduplication
48
- SELECT owner, repo, repo_dir, file_rel_repo, file_rel_outdir, size, mtime, lang, content
49
  FROM ranked
50
  WHERE rn = 1
51
- """
52
 
53
  rel = duckdb.sql(query)
54
  rel.to_parquet("gooddocs.parquet")
 
37
  -- Step 3: Keep only the latest version for duplicates, but preserve all README.md and Contributing.md files
38
  filtered_dup AS (
39
  SELECT * FROM ranked_mtime
40
+ WHERE rn_mtime = 1 OR file_rel_repo LIKE 'README.md' OR file_rel_repo ILIKE 'contributing.md'
41
  ),
42
+ -- Step 4: Remove YAML front matter and drop short documents
43
+ frontmatter_stripped AS (
44
+ SELECT *, REGEXP_REPLACE(content, '(?s)^---\\s*\n.*?\n---\\s*\n*', '') AS content_stripped
45
  FROM filtered_dup
46
+ ),
47
+ length_filtered AS (
48
+ SELECT *
49
+ FROM frontmatter_stripped
50
+ WHERE LENGTH(TRIM(content_stripped)) >= 150
51
+ ),
52
+ -- Step 5: Rank by cleaned content to remove duplicate content, keeping the first occurrence
53
+ ranked AS (
54
+ SELECT *, ROW_NUMBER() OVER (PARTITION BY content_stripped ORDER BY owner, repo, file_rel_repo) as rn
55
+ FROM length_filtered
56
  )
57
  -- Final selection: unique content after filename deduplication
58
+ SELECT owner, repo, repo_dir, file_rel_repo, file_rel_outdir, size, mtime, lang, content_stripped AS content
59
  FROM ranked
60
  WHERE rn = 1
61
+ """ #FIXME: contributing filter currently does nothing.
62
 
63
  rel = duckdb.sql(query)
64
  rel.to_parquet("gooddocs.parquet")
clean/clean_meta.yaml CHANGED
@@ -22,13 +22,13 @@ updated_after: null
22
  # Restrict to specific owners (orgs/users); empty means no include filter
23
  include_owners: []
24
  # Exclude these owners
25
- exclude_owners: []
26
  # Repo name filters (substring match, case-insensitive) over 'repo'
27
  include_repo_name_substrings: []
28
- exclude_repo_name_substrings: ["awesome", "cheatsheet", "resume", "books","days"]
29
  # Topic filters (substring match, case-insensitive) over comma-joined topics field
30
  include_topic_substrings: []
31
- exclude_topic_substrings: ["interview","interview-prep","learn","roadmap","chinese", "awesome", "cheatsheet", "chatgpt", "books"]
32
  #NOTE: also filter chatGPT label because many of those projects are dead.
33
  # Require presence of a README detected by fetch_gh_meta.py
34
  require_readme_found: true
 
22
  # Restrict to specific owners (orgs/users); empty means no include filter
23
  include_owners: []
24
  # Exclude these owners
25
+ exclude_owners: [appwrite]
26
  # Repo name filters (substring match, case-insensitive) over 'repo'
27
  include_repo_name_substrings: []
28
+ exclude_repo_name_substrings: ["awesome", "cheatsheet", "resume", "books","days","bootcamp"]
29
  # Topic filters (substring match, case-insensitive) over comma-joined topics field
30
  include_topic_substrings: []
31
+ exclude_topic_substrings: ["interview","interview-prep","learn","roadmap","chinese", "awesome", "cheatsheet", "chatgpt", "books", "bootcamp"]
32
  #NOTE: also filter chatGPT label because many of those projects are dead.
33
  # Require presence of a README detected by fetch_gh_meta.py
34
  require_readme_found: true