SunDou commited on
Commit
90e19a0
·
verified ·
1 Parent(s): e10e693

Upload data1/download_dataset.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. data1/download_dataset.py +69 -0
data1/download_dataset.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ from datasets import load_dataset
4
+ from tqdm import tqdm
5
+
6
+ # Create output directory
7
+ os.makedirs("./datasets", exist_ok=True)
8
+
9
+ # Get all available configurations
10
+ configs = ["codeparrot_github-code-chemistry-python-default", "starcoder-chemistry-default"]
11
+
12
+ all_dataframes = []
13
+
14
+ # Download and process each configuration
15
+ for config in tqdm(configs, desc="Processing configurations"):
16
+ print(f"\nLoading configuration: {config}")
17
+
18
+ # Load dataset with all splits
19
+ dataset = load_dataset("jablonkagroup/chempile-code", config, cache_dir="./datasets")
20
+
21
+ # Process each split
22
+ for split_name, split_data in dataset.items():
23
+ # Convert to DataFrame
24
+ df = pd.DataFrame(split_data)
25
+ df["config"] = config # Add configuration identifier
26
+ df["split"] = split_name # Add split identifier
27
+ all_dataframes.append(df)
28
+ # Save individual split as CSV
29
+ df.to_csv(f"./datasets/{config}_{split_name}.csv", index=False)
30
+
31
+ # Merge all data into one large DataFrame
32
+ print("\nMerging all data...")
33
+ merged_df = pd.concat(all_dataframes, ignore_index=True)
34
+
35
+ # Save merged dataset
36
+ merged_output_path = "./datasets/chempile_code_complete.csv"
37
+ merged_df.to_csv(merged_output_path, index=False)
38
+
39
+ merged_df = pd.read_csv(merged_output_path)
40
+ all_data_path = "./datasets/all_chempile_code"
41
+ # Create output directories
42
+ os.makedirs(all_data_path, exist_ok=True)
43
+
44
+ # Replace with chunked saving after merging
45
+ print("\nSaving in 500MB chunks...")
46
+ MAX_SIZE_MB = 500
47
+ chunk_num = 1
48
+ rows_per_chunk = 50000 # Initial estimate
49
+
50
+ start_idx = 0
51
+ while start_idx < len(merged_df):
52
+ # Get chunk
53
+ end_idx = min(start_idx + rows_per_chunk, len(merged_df))
54
+ chunk_df = merged_df.iloc[start_idx:end_idx]
55
+
56
+ # Save and check size
57
+ output_path = f"{all_data_path}/chempile_code_complete_{chunk_num:03d}.csv"
58
+ chunk_df.to_csv(output_path, index=False)
59
+ size_mb = os.path.getsize(output_path) / (1024 * 1024)
60
+
61
+ # Adjust rows_per_chunk for next iteration based on actual size
62
+ if size_mb > 0:
63
+ rows_per_chunk = int(rows_per_chunk * (MAX_SIZE_MB / size_mb) * 0.95)
64
+
65
+ print(f"Saved {output_path}: {size_mb:.1f}MB, {len(chunk_df):,} rows")
66
+ start_idx = end_idx
67
+ chunk_num += 1
68
+
69
+ print(f"\nTotal: {len(merged_df):,} rows in {chunk_num-1} files")