jerpint commited on
Commit
25a9c33
·
1 Parent(s): f5f457e

remove dataloader

Browse files
Files changed (1) hide show
  1. create_splits.py +30 -160
create_splits.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import pandas as pd
3
  import torch
4
  from torch.utils.data import Dataset, DataLoader
@@ -8,7 +9,6 @@ from typing import List, Dict, Optional
8
  import numpy as np
9
 
10
  def create_dataset_splits(
11
- data_path: str,
12
  metadata_path: str,
13
  output_dir: str,
14
  train_ratio: float = 0.8,
@@ -19,50 +19,30 @@ def create_dataset_splits(
19
  Create and save train/val/test splits to disk.
20
 
21
  Args:
22
- data_path: Path to the JSON file containing voice mappings
23
  metadata_path: Path to the metadata CSV file
24
  output_dir: Directory to save the split CSV files
25
  train_ratio: Ratio of data to use for training
26
  val_ratio: Ratio of data to use for validation
27
  seed: Random seed for reproducibility
28
  """
29
- # Load raw data
30
- with open(data_path, 'r') as f:
31
- raw_data = json.load(f)
32
 
33
- df_metadata = pd.read_csv(metadata_path)
34
 
35
- # Convert to DataFrame
36
- records = []
37
- for path, available_models in raw_data.items():
38
- df_metadata_row = df_metadata[df_metadata['path'] == path]
39
- for model in available_models:
40
- records.append({
41
- 'path': path,
42
- 'model': model,
43
- 'is_cloned_voice': model != 'commonvoice',
44
- 'age': df_metadata_row['age'].values[0],
45
- 'gender': df_metadata_row['gender'].values[0],
46
- 'accents': df_metadata_row['accents'].values[0],
47
- 'sentence': df_metadata_row['sentence'].values[0],
48
- })
49
-
50
- df = pd.DataFrame.from_records(records)
51
-
52
- # Create deterministic split based on path
53
  np.random.seed(seed)
54
- unique_paths = df['path'].unique()
55
- np.random.shuffle(unique_paths)
56
 
57
- n_samples = len(unique_paths)
 
 
 
 
58
  train_idx = int(n_samples * train_ratio)
59
  val_idx = int(n_samples * (train_ratio + val_ratio))
60
 
61
  # Create split DataFrames
62
  splits = {
63
- 'train': df[df['path'].isin(unique_paths[:train_idx])],
64
- 'val': df[df['path'].isin(unique_paths[train_idx:val_idx])],
65
- 'test': df[df['path'].isin(unique_paths[val_idx:])]
66
  }
67
 
68
  # Save splits
@@ -74,145 +54,35 @@ def create_dataset_splits(
74
  split_df.to_csv(output_dir / f'{split_name}.csv', index=False)
75
 
76
  # Save split info
77
- split_info = {
78
- 'num_samples': {
79
- split_name: len(paths)
80
- for split_name, paths in {
81
- 'train': unique_paths[:train_idx],
82
- 'val': unique_paths[train_idx:val_idx],
83
- 'test': unique_paths[val_idx:]
84
- }.items()
85
- },
86
- 'train_ratio': train_ratio,
87
- 'val_ratio': val_ratio,
88
- 'seed': seed
89
  }
90
-
91
- with open(output_dir / 'split_info.json', 'w') as f:
92
- json.dump(split_info, f, indent=2)
93
-
94
- class VoiceDataset(Dataset):
95
- def __init__(
96
- self,
97
- split_path: str,
98
- clips_dir: str,
99
- models: Optional[List[str]] = None
100
- ):
101
- """
102
- Args:
103
- split_path: Path to the CSV file containing the split data
104
- models: List of model names to include. If None, includes all models
105
- """
106
- self.data = pd.read_csv(split_path)
107
- self.clips_dir = clips_dir
108
-
109
- # Filter models if specified
110
- if models is not None:
111
- self.data = self.data[self.data['model'].isin(models)]
112
-
113
- # Create path to index mapping
114
- self.path_to_idx = {path: idx for idx, path in enumerate(self.data['path'].unique())}
115
- self.split_name = Path(split_path).stem # Get split name from file path
116
- self.original_models = [model for model in self.data['model'].unique() if model != 'commonvoice']
117
-
118
-
119
-
120
- def __len__(self):
121
- return len(self.data)
122
-
123
- def __getitem__(self, idx):
124
- row = self.data.iloc[idx]
125
- path = row['path']
126
- model = row['model']
127
- rel_path = os.path.join(model, path)
128
- abs_path = os.path.join(self.clips_dir, rel_path)
129
- return {
130
- 'path': abs_path,
131
- 'model': model,
132
- 'is_cloned_voice': row['is_cloned_voice'],
133
- }
134
-
135
- def summary(self) -> Dict:
136
- """
137
- Generate a comprehensive summary of the dataset.
138
-
139
- Returns:
140
- Dictionary containing summary statistics
141
- """
142
- summary = {
143
- 'split': self.split_name,
144
- 'total_samples': len(self.data),
145
- 'cloned_samples': len(self.data[self.data['is_cloned_voice']]),
146
- 'original_samples': len(self.data[~self.data['is_cloned_voice']]),
147
- 'unique_voices': len(self.path_to_idx),
148
- 'models': {
149
- 'available': list(self.original_models),
150
- 'selected': list(self.data['model'].unique()),
151
- },
152
- 'samples_per_model': self.data['model'].value_counts().to_dict(),
153
- 'voices_per_model': self.data.groupby('model')['path'].nunique().to_dict(),
154
  }
155
 
156
- return summary
157
-
158
-
159
- def print_summary(self):
160
- """
161
- Print a formatted summary of the dataset.
162
-
163
- Args:
164
- include_metadata: Whether to include metadata statistics in the summary
165
- """
166
- summary = self.summary()
167
-
168
- print(f"\n=== Dataset Summary ({summary['split'].upper()} split) ===")
169
- print(f"Total samples: {summary['total_samples']}")
170
- print(f"Cloned samples: {summary['cloned_samples']}")
171
- print(f"Original samples: {summary['original_samples']}")
172
-
173
- print("\nModels:")
174
- print(f"Available: {', '.join(summary['models']['available'])}")
175
- print(f"Selected: {', '.join(summary['models']['selected'])}")
176
-
177
- print("\nSamples per model:")
178
- for model, count in summary['samples_per_model'].items():
179
- print(f" {model}: {count}")
180
-
181
- print("\nUnique voices per model:")
182
- for model, count in summary['voices_per_model'].items():
183
- print(f" {model}: {count}")
184
 
 
 
185
 
186
 
187
  # Example usage:
188
  if __name__ == "__main__":
189
- json_file = 'files.json'
190
- metadata_file = 'metadata-balanced.csv'
191
  clips_dir = '.'
192
  output_dir = 'splits'
193
 
194
  # Create splits
195
- create_dataset_splits(json_file, metadata_file, output_dir=output_dir)
196
-
197
-
198
- # Create datasets for each split
199
- train_dataset = VoiceDataset(
200
- 'splits/train.csv',
201
- clips_dir=clips_dir,
202
- models=['commonvoice', 'metavoice', 'xttsv2']
203
- )
204
-
205
- val_dataset = VoiceDataset(
206
- 'splits/val.csv',
207
- clips_dir=clips_dir,
208
- models=['commonvoice', 'metavoice', 'xttsv2']
209
- )
210
-
211
- test_dataset = VoiceDataset(
212
- 'splits/test.csv',
213
- clips_dir=clips_dir,
214
- models=['commonvoice', 'metavoice', 'xttsv2']
215
- )
216
-
217
- # Create DataLoader
218
- train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
 
1
  import os
2
+ import pprint
3
  import pandas as pd
4
  import torch
5
  from torch.utils.data import Dataset, DataLoader
 
9
  import numpy as np
10
 
11
  def create_dataset_splits(
 
12
  metadata_path: str,
13
  output_dir: str,
14
  train_ratio: float = 0.8,
 
19
  Create and save train/val/test splits to disk.
20
 
21
  Args:
 
22
  metadata_path: Path to the metadata CSV file
23
  output_dir: Directory to save the split CSV files
24
  train_ratio: Ratio of data to use for training
25
  val_ratio: Ratio of data to use for validation
26
  seed: Random seed for reproducibility
27
  """
 
 
 
28
 
29
+ df = pd.read_csv(metadata_path)
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  np.random.seed(seed)
 
 
32
 
33
+ # We will be splitting on the filename. This ensures that a cloned voice is always with the same original voice in a given split, and not split between train/val/test.
34
+ unique_filenames = df['filename'].unique()
35
+ np.random.shuffle(unique_filenames)
36
+
37
+ n_samples = len(unique_filenames)
38
  train_idx = int(n_samples * train_ratio)
39
  val_idx = int(n_samples * (train_ratio + val_ratio))
40
 
41
  # Create split DataFrames
42
  splits = {
43
+ 'train': df[df['filename'].isin(unique_filenames[:train_idx])],
44
+ 'val': df[df['filename'].isin(unique_filenames[train_idx:val_idx])],
45
+ 'test': df[df['filename'].isin(unique_filenames[val_idx:])]
46
  }
47
 
48
  # Save splits
 
54
  split_df.to_csv(output_dir / f'{split_name}.csv', index=False)
55
 
56
  # Save split info
57
+ split_info = {}
58
+ split_info['metadata_path'] = metadata_path
59
+ split_info['seed'] = seed
60
+ split_info['ratios'] = {
61
+ 'train': train_ratio,
62
+ 'val': val_ratio,
63
+ 'test': round(1 - train_ratio - val_ratio, 2),
 
 
 
 
 
64
  }
65
+ for split_name, split_df in splits.items():
66
+ split_info[split_name] = {
67
+ 'total_num_samples': len(split_df),
68
+ 'original_samples': len(split_df[split_df['is_cloned_voice'] == False]),
69
+ 'cloned_samples': len(split_df[split_df['is_cloned_voice'] == True]),
70
+ 'sources': split_df['source'].value_counts().to_dict(),
71
+ 'voices_per_source': split_df.groupby('source')['path'].nunique().to_dict(),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  }
73
 
74
+ pprint.pprint(split_info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
+ with open(output_dir / 'split_info.json', 'w') as f:
77
+ json.dump(split_info, f, indent=2)
78
 
79
 
80
  # Example usage:
81
  if __name__ == "__main__":
82
+ # json_file = 'files.json'
83
+ metadata_file = 'metadata-valid.csv'
84
  clips_dir = '.'
85
  output_dir = 'splits'
86
 
87
  # Create splits
88
+ create_dataset_splits(metadata_file, output_dir=output_dir)