File size: 12,972 Bytes
44bf929
a17cfac
 
 
44bf929
a17cfac
44bf929
a17cfac
 
 
44bf929
 
 
a17cfac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44bf929
 
 
a17cfac
 
44bf929
a17cfac
 
 
 
 
 
 
 
 
 
 
 
44bf929
a17cfac
 
 
 
 
 
 
 
 
 
 
 
b20a9a3
a17cfac
 
 
 
 
 
44bf929
a17cfac
 
44bf929
 
a17cfac
 
44bf929
 
 
a17cfac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44bf929
a17cfac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44bf929
a17cfac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
import os
import tempfile
import zipfile
import zarr
import numpy as np
from typing import Dict, List, Any, Optional
import datasets
from datasets import DownloadManager, DatasetInfo, Split, SplitGenerator, Features, Value, Array2D, Array3D
import fsspec
from pathlib import Path


class TestDownloadManager(datasets.DownloadManager):
    """Custom download manager that handles zarr chunks in zip format for streaming."""
    
    def __init__(self, dataset_name: str = "test", cache_dir: Optional[str] = None):
        # Initialize parent without cache_dir parameter since it may not accept it
        super().__init__()
        self.dataset_name = dataset_name
        # Set cache_dir manually if provided
        if cache_dir:
            self.cache_dir = cache_dir
        elif not hasattr(self, 'cache_dir') or self.cache_dir is None:
            # Fallback to default cache directory
            import tempfile
            self.cache_dir = tempfile.gettempdir()
        
    def download_zarr_chunks(self, traces_path: str, chunk_size: int = 100) -> str:
        """
        Convert traces.npy to zarr format with chunks and store in zip file.
        Returns path to the zip file containing zarr chunks.
        """
        # Load the original traces data
        traces = np.load(traces_path)
        
        # Create temporary directory for zarr store
        temp_dir = tempfile.mkdtemp()
        zarr_path = os.path.join(temp_dir, "traces.zarr")
        zip_path = os.path.join(temp_dir, "traces_zarr.zip")
        
        # Create zarr array with chunking using zarr v2 format
        chunks = (chunk_size, traces.shape[1])  # Chunk along the first dimension
        zarr_array = zarr.open(zarr_path, mode='w', shape=traces.shape, 
                              chunks=chunks, dtype=traces.dtype)
        
        # Write data in chunks
        for i in range(0, traces.shape[0], chunk_size):
            end_idx = min(i + chunk_size, traces.shape[0])
            zarr_array[i:end_idx] = traces[i:end_idx]
        
        # Create zip file with zarr store - include the zarr directory structure
        with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
            for root, dirs, files in os.walk(zarr_path):
                for file in files:
                    file_path = os.path.join(root, file)
                    # Keep the zarr directory structure in the zip
                    arcname = os.path.relpath(file_path, temp_dir)
                    zipf.write(file_path, arcname)
        
        # Move to cache directory
        cache_path = os.path.join(self.cache_dir, f"{self.dataset_name}_traces_zarr.zip")
        os.makedirs(os.path.dirname(cache_path), exist_ok=True)
        
        # Copy to cache if not exists or if source is newer
        if not os.path.exists(cache_path) or os.path.getmtime(zip_path) > os.path.getmtime(cache_path):
            import shutil
            shutil.copy2(zip_path, cache_path)
        
        return cache_path


class TestDataset(datasets.GeneratorBasedBuilder):
    """Custom dataset for DLSCA test data with streaming zarr support."""
    
    VERSION = datasets.Version("1.0.0")
    
    def _info(self) -> DatasetInfo:
        """Define the dataset information and features."""
        return DatasetInfo(
            description="DLSCA test dataset with streaming support for large traces",
            features=Features({
                "labels": datasets.Sequence(datasets.Value("int32"), length=4),
                "traces": datasets.Sequence(datasets.Value("int8"), length=20971),
                "index": Value("int32"),
            }),
            supervised_keys=("traces", "labels"),
            homepage="https://huggingface.co/datasets/DLSCA/test",
        )
    
    def _split_generators(self, dl_manager: DownloadManager) -> List[SplitGenerator]:
        """Define the data splits."""
        # Use custom download manager if available, otherwise use standard paths
        if isinstance(dl_manager, TestDownloadManager):
            # For remote/cached access
            data_dir = os.path.join(os.path.dirname(__file__), "data")
            labels_path = os.path.join(data_dir, "labels.npy")
            
            # Convert and cache zarr chunks
            traces_path = os.path.join(data_dir, "traces.npy")
            zarr_zip_path = dl_manager.download_zarr_chunks(traces_path)
        else:
            # For local development
            data_dir = os.path.join(os.path.dirname(__file__), "data")
            labels_path = os.path.join(data_dir, "labels.npy")
            traces_path = os.path.join(data_dir, "traces.npy")
            zarr_zip_path = None
        
        return [
            SplitGenerator(
                name=Split.TRAIN,
                gen_kwargs={
                    "labels_path": labels_path,
                    "traces_path": traces_path,
                    "zarr_zip_path": zarr_zip_path,
                },
            ),
        ]
    
    def _generate_examples(self, labels_path: str, traces_path: str, zarr_zip_path: Optional[str] = None):
        """Generate examples from the dataset."""
        # Load labels (small file, can load entirely)
        labels = np.load(labels_path)
        
        if zarr_zip_path and os.path.exists(zarr_zip_path):
            # Use zarr from zip for streaming access
            traces_array = self._load_zarr_from_zip(zarr_zip_path)
        else:
            # Fallback to numpy array for local development
            traces_array = np.load(traces_path)
        
        # Generate examples
        for idx in range(len(labels)):
            yield idx, {
                "labels": labels[idx],
                "traces": traces_array[idx] if zarr_zip_path else traces_array[idx],
                "index": idx,
            }
    
    def _load_zarr_from_zip(self, zip_path: str) -> zarr.Array:
        """Load zarr array from zip file with streaming support."""
        # Create a filesystem that can read from zip
        fs = fsspec.filesystem('zip', fo=zip_path)
        
        # Open zarr array through the zip filesystem
        mapper = fs.get_mapper('traces.zarr')
        zarr_array = zarr.open(mapper, mode='r')
        
        return zarr_array
    
    def _get_chunk_indices(self, start_idx: int, end_idx: int, chunk_size: int = 100) -> List[tuple]:
        """Helper method to get chunk indices for streaming access."""
        chunks = []
        current_idx = start_idx
        while current_idx < end_idx:
            chunk_start = (current_idx // chunk_size) * chunk_size
            chunk_end = min(chunk_start + chunk_size, end_idx)
            chunks.append((chunk_start, chunk_end))
            current_idx = chunk_end
        return chunks


# Utility functions for dataset usage
def get_dataset_info():
    """Get information about the dataset."""
    dataset = TestDataset()
    info = {
        "description": "DLSCA test dataset with streaming support",
        "total_examples": 1000,
        "features": {
            "labels": {"shape": (4,), "dtype": "int32"},
            "traces": {"shape": (20971,), "dtype": "int8"},
            "index": {"dtype": "int32"}
        },
        "splits": ["train"],
        "size_info": {
            "labels_file": "~16KB",
            "traces_file": "~20MB", 
            "zarr_chunks": "10 chunks of 100 examples each"
        }
    }
    return info


def create_data_loader(zarr_zip_path: str, batch_size: int = 32, shuffle: bool = True):
    """Create a data loader for the zarr dataset."""
    dataset = TestDataset()
    zarr_array = dataset._load_zarr_from_zip(zarr_zip_path)
    labels = np.load(os.path.join(os.path.dirname(__file__), "data", "labels.npy"))
    
    # Simple batch generator
    def batch_generator():
        indices = list(range(len(labels)))
        if shuffle:
            import random
            random.shuffle(indices)
        
        for i in range(0, len(indices), batch_size):
            batch_indices = indices[i:i+batch_size]
            batch_traces = zarr_array[batch_indices]
            batch_labels = labels[batch_indices]
            yield {
                "traces": batch_traces,
                "labels": batch_labels,
                "indices": batch_indices
            }
    
    return batch_generator


def validate_dataset_integrity():
    """Validate that zarr conversion preserves data integrity."""
    # Load original data
    original_traces = np.load(os.path.join(os.path.dirname(__file__), "data", "traces.npy"))
    original_labels = np.load(os.path.join(os.path.dirname(__file__), "data", "labels.npy"))
    
    # Convert to zarr and load back
    dl_manager = TestDownloadManager()
    traces_path = os.path.join(os.path.dirname(__file__), "data", "traces.npy")
    zarr_zip_path = dl_manager.download_zarr_chunks(traces_path)
    
    dataset = TestDataset()
    zarr_traces = dataset._load_zarr_from_zip(zarr_zip_path)
    
    # Validate
    traces_match = np.array_equal(original_traces, zarr_traces[:])
    shapes_match = original_traces.shape == zarr_traces.shape
    dtypes_match = original_traces.dtype == zarr_traces.dtype
    
    validation_results = {
        "traces_data_match": traces_match,
        "shapes_match": shapes_match,
        "dtypes_match": dtypes_match,
        "original_shape": original_traces.shape,
        "zarr_shape": zarr_traces.shape,
        "original_dtype": str(original_traces.dtype),
        "zarr_dtype": str(zarr_traces.dtype),
        "zarr_chunks": zarr_traces.chunks
    }
    
    return validation_results


# Additional convenience functions for Hugging Face Hub integration
def prepare_for_hub_upload():
    """Prepare dataset files for Hugging Face Hub upload."""
    print("Preparing dataset for Hugging Face Hub upload...")
    
    # Validate dataset integrity
    validation = validate_dataset_integrity()
    if not all([validation["traces_data_match"], validation["shapes_match"], validation["dtypes_match"]]):
        raise ValueError("Dataset validation failed!")
    
    # Get dataset info
    info = get_dataset_info()
    
    print("✅ Dataset validation passed")
    print(f"✅ Total examples: {info['total_examples']}")
    print(f"✅ Features: {list(info['features'].keys())}")
    print(f"✅ Zarr chunks: {validation['zarr_chunks']}")
    
    return {
        "validation": validation,
        "info": info,
        "ready_for_upload": True
    }


# Example usage
if __name__ == "__main__":
    # For local testing
    print("Loading dataset locally...")
    dataset = TestDataset()
    
    # Download and prepare the dataset first
    print("Downloading and preparing dataset...")
    dataset.download_and_prepare()
    
    # Build dataset
    dataset_dict = dataset.as_dataset(split="train")
    
    print(f"Dataset size: {len(dataset_dict)}")
    print(f"Features: {dataset_dict.features}")
    
    # Show first example
    first_example = dataset_dict[0]
    print(f"First example - Labels length: {len(first_example['labels'])}")
    print(f"First example - Traces length: {len(first_example['traces'])}")
    print(f"First example - Labels: {first_example['labels']}")
    print(f"First example - Index: {first_example['index']}")
    
    # Test zarr conversion
    print("\nTesting zarr conversion...")
    dl_manager = TestDownloadManager()
    traces_path = os.path.join(os.path.dirname(__file__), "data", "traces.npy")
    zarr_zip_path = dl_manager.download_zarr_chunks(traces_path, chunk_size=100)
    print(f"Zarr zip created at: {zarr_zip_path}")
    
    # Test loading from zarr zip
    test_dataset_zarr = TestDataset()
    zarr_array = test_dataset_zarr._load_zarr_from_zip(zarr_zip_path)
    print(f"Zarr array shape: {zarr_array.shape}")
    print(f"Zarr array dtype: {zarr_array.dtype}")
    print(f"Zarr array chunks: {zarr_array.chunks}")
    
    # Verify data integrity
    original_traces = np.load(traces_path)
    print(f"Data integrity check: {np.array_equal(original_traces, zarr_array[:])}")
    
    print("\n=== Dataset Utilities Test ===")
    
    # Test dataset info
    info = get_dataset_info()
    print(f"Dataset info: {info['total_examples']} examples")
    
    # Test validation
    validation = validate_dataset_integrity()
    print(f"Validation passed: {validation['traces_data_match']}")
    
    # Test data loader
    dl_manager = TestDownloadManager()
    traces_path = os.path.join(os.path.dirname(__file__), "data", "traces.npy")
    zarr_zip_path = dl_manager.download_zarr_chunks(traces_path)
    
    batch_gen = create_data_loader(zarr_zip_path, batch_size=16)
    first_batch = next(batch_gen())
    print(f"First batch shape: traces={first_batch['traces'].shape}, labels={first_batch['labels'].shape}")
    
    # Test hub preparation
    hub_status = prepare_for_hub_upload()
    print(f"Ready for Hub upload: {hub_status['ready_for_upload']}")
    
    print("\n✅ All utilities working correctly!")