test / test.py
pbk0's picture
save
a17cfac
import os
import tempfile
import zipfile
import zarr
import numpy as np
from typing import Dict, List, Any, Optional
import datasets
from datasets import DownloadManager, DatasetInfo, Split, SplitGenerator, Features, Value, Array2D, Array3D
import fsspec
from pathlib import Path
class TestDownloadManager(datasets.DownloadManager):
"""Custom download manager that handles zarr chunks in zip format for streaming."""
def __init__(self, dataset_name: str = "test", cache_dir: Optional[str] = None):
# Initialize parent without cache_dir parameter since it may not accept it
super().__init__()
self.dataset_name = dataset_name
# Set cache_dir manually if provided
if cache_dir:
self.cache_dir = cache_dir
elif not hasattr(self, 'cache_dir') or self.cache_dir is None:
# Fallback to default cache directory
import tempfile
self.cache_dir = tempfile.gettempdir()
def download_zarr_chunks(self, traces_path: str, chunk_size: int = 100) -> str:
"""
Convert traces.npy to zarr format with chunks and store in zip file.
Returns path to the zip file containing zarr chunks.
"""
# Load the original traces data
traces = np.load(traces_path)
# Create temporary directory for zarr store
temp_dir = tempfile.mkdtemp()
zarr_path = os.path.join(temp_dir, "traces.zarr")
zip_path = os.path.join(temp_dir, "traces_zarr.zip")
# Create zarr array with chunking using zarr v2 format
chunks = (chunk_size, traces.shape[1]) # Chunk along the first dimension
zarr_array = zarr.open(zarr_path, mode='w', shape=traces.shape,
chunks=chunks, dtype=traces.dtype)
# Write data in chunks
for i in range(0, traces.shape[0], chunk_size):
end_idx = min(i + chunk_size, traces.shape[0])
zarr_array[i:end_idx] = traces[i:end_idx]
# Create zip file with zarr store - include the zarr directory structure
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
for root, dirs, files in os.walk(zarr_path):
for file in files:
file_path = os.path.join(root, file)
# Keep the zarr directory structure in the zip
arcname = os.path.relpath(file_path, temp_dir)
zipf.write(file_path, arcname)
# Move to cache directory
cache_path = os.path.join(self.cache_dir, f"{self.dataset_name}_traces_zarr.zip")
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
# Copy to cache if not exists or if source is newer
if not os.path.exists(cache_path) or os.path.getmtime(zip_path) > os.path.getmtime(cache_path):
import shutil
shutil.copy2(zip_path, cache_path)
return cache_path
class TestDataset(datasets.GeneratorBasedBuilder):
"""Custom dataset for DLSCA test data with streaming zarr support."""
VERSION = datasets.Version("1.0.0")
def _info(self) -> DatasetInfo:
"""Define the dataset information and features."""
return DatasetInfo(
description="DLSCA test dataset with streaming support for large traces",
features=Features({
"labels": datasets.Sequence(datasets.Value("int32"), length=4),
"traces": datasets.Sequence(datasets.Value("int8"), length=20971),
"index": Value("int32"),
}),
supervised_keys=("traces", "labels"),
homepage="https://huggingface.co/datasets/DLSCA/test",
)
def _split_generators(self, dl_manager: DownloadManager) -> List[SplitGenerator]:
"""Define the data splits."""
# Use custom download manager if available, otherwise use standard paths
if isinstance(dl_manager, TestDownloadManager):
# For remote/cached access
data_dir = os.path.join(os.path.dirname(__file__), "data")
labels_path = os.path.join(data_dir, "labels.npy")
# Convert and cache zarr chunks
traces_path = os.path.join(data_dir, "traces.npy")
zarr_zip_path = dl_manager.download_zarr_chunks(traces_path)
else:
# For local development
data_dir = os.path.join(os.path.dirname(__file__), "data")
labels_path = os.path.join(data_dir, "labels.npy")
traces_path = os.path.join(data_dir, "traces.npy")
zarr_zip_path = None
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"labels_path": labels_path,
"traces_path": traces_path,
"zarr_zip_path": zarr_zip_path,
},
),
]
def _generate_examples(self, labels_path: str, traces_path: str, zarr_zip_path: Optional[str] = None):
"""Generate examples from the dataset."""
# Load labels (small file, can load entirely)
labels = np.load(labels_path)
if zarr_zip_path and os.path.exists(zarr_zip_path):
# Use zarr from zip for streaming access
traces_array = self._load_zarr_from_zip(zarr_zip_path)
else:
# Fallback to numpy array for local development
traces_array = np.load(traces_path)
# Generate examples
for idx in range(len(labels)):
yield idx, {
"labels": labels[idx],
"traces": traces_array[idx] if zarr_zip_path else traces_array[idx],
"index": idx,
}
def _load_zarr_from_zip(self, zip_path: str) -> zarr.Array:
"""Load zarr array from zip file with streaming support."""
# Create a filesystem that can read from zip
fs = fsspec.filesystem('zip', fo=zip_path)
# Open zarr array through the zip filesystem
mapper = fs.get_mapper('traces.zarr')
zarr_array = zarr.open(mapper, mode='r')
return zarr_array
def _get_chunk_indices(self, start_idx: int, end_idx: int, chunk_size: int = 100) -> List[tuple]:
"""Helper method to get chunk indices for streaming access."""
chunks = []
current_idx = start_idx
while current_idx < end_idx:
chunk_start = (current_idx // chunk_size) * chunk_size
chunk_end = min(chunk_start + chunk_size, end_idx)
chunks.append((chunk_start, chunk_end))
current_idx = chunk_end
return chunks
# Utility functions for dataset usage
def get_dataset_info():
"""Get information about the dataset."""
dataset = TestDataset()
info = {
"description": "DLSCA test dataset with streaming support",
"total_examples": 1000,
"features": {
"labels": {"shape": (4,), "dtype": "int32"},
"traces": {"shape": (20971,), "dtype": "int8"},
"index": {"dtype": "int32"}
},
"splits": ["train"],
"size_info": {
"labels_file": "~16KB",
"traces_file": "~20MB",
"zarr_chunks": "10 chunks of 100 examples each"
}
}
return info
def create_data_loader(zarr_zip_path: str, batch_size: int = 32, shuffle: bool = True):
"""Create a data loader for the zarr dataset."""
dataset = TestDataset()
zarr_array = dataset._load_zarr_from_zip(zarr_zip_path)
labels = np.load(os.path.join(os.path.dirname(__file__), "data", "labels.npy"))
# Simple batch generator
def batch_generator():
indices = list(range(len(labels)))
if shuffle:
import random
random.shuffle(indices)
for i in range(0, len(indices), batch_size):
batch_indices = indices[i:i+batch_size]
batch_traces = zarr_array[batch_indices]
batch_labels = labels[batch_indices]
yield {
"traces": batch_traces,
"labels": batch_labels,
"indices": batch_indices
}
return batch_generator
def validate_dataset_integrity():
"""Validate that zarr conversion preserves data integrity."""
# Load original data
original_traces = np.load(os.path.join(os.path.dirname(__file__), "data", "traces.npy"))
original_labels = np.load(os.path.join(os.path.dirname(__file__), "data", "labels.npy"))
# Convert to zarr and load back
dl_manager = TestDownloadManager()
traces_path = os.path.join(os.path.dirname(__file__), "data", "traces.npy")
zarr_zip_path = dl_manager.download_zarr_chunks(traces_path)
dataset = TestDataset()
zarr_traces = dataset._load_zarr_from_zip(zarr_zip_path)
# Validate
traces_match = np.array_equal(original_traces, zarr_traces[:])
shapes_match = original_traces.shape == zarr_traces.shape
dtypes_match = original_traces.dtype == zarr_traces.dtype
validation_results = {
"traces_data_match": traces_match,
"shapes_match": shapes_match,
"dtypes_match": dtypes_match,
"original_shape": original_traces.shape,
"zarr_shape": zarr_traces.shape,
"original_dtype": str(original_traces.dtype),
"zarr_dtype": str(zarr_traces.dtype),
"zarr_chunks": zarr_traces.chunks
}
return validation_results
# Additional convenience functions for Hugging Face Hub integration
def prepare_for_hub_upload():
"""Prepare dataset files for Hugging Face Hub upload."""
print("Preparing dataset for Hugging Face Hub upload...")
# Validate dataset integrity
validation = validate_dataset_integrity()
if not all([validation["traces_data_match"], validation["shapes_match"], validation["dtypes_match"]]):
raise ValueError("Dataset validation failed!")
# Get dataset info
info = get_dataset_info()
print("✅ Dataset validation passed")
print(f"✅ Total examples: {info['total_examples']}")
print(f"✅ Features: {list(info['features'].keys())}")
print(f"✅ Zarr chunks: {validation['zarr_chunks']}")
return {
"validation": validation,
"info": info,
"ready_for_upload": True
}
# Example usage
if __name__ == "__main__":
# For local testing
print("Loading dataset locally...")
dataset = TestDataset()
# Download and prepare the dataset first
print("Downloading and preparing dataset...")
dataset.download_and_prepare()
# Build dataset
dataset_dict = dataset.as_dataset(split="train")
print(f"Dataset size: {len(dataset_dict)}")
print(f"Features: {dataset_dict.features}")
# Show first example
first_example = dataset_dict[0]
print(f"First example - Labels length: {len(first_example['labels'])}")
print(f"First example - Traces length: {len(first_example['traces'])}")
print(f"First example - Labels: {first_example['labels']}")
print(f"First example - Index: {first_example['index']}")
# Test zarr conversion
print("\nTesting zarr conversion...")
dl_manager = TestDownloadManager()
traces_path = os.path.join(os.path.dirname(__file__), "data", "traces.npy")
zarr_zip_path = dl_manager.download_zarr_chunks(traces_path, chunk_size=100)
print(f"Zarr zip created at: {zarr_zip_path}")
# Test loading from zarr zip
test_dataset_zarr = TestDataset()
zarr_array = test_dataset_zarr._load_zarr_from_zip(zarr_zip_path)
print(f"Zarr array shape: {zarr_array.shape}")
print(f"Zarr array dtype: {zarr_array.dtype}")
print(f"Zarr array chunks: {zarr_array.chunks}")
# Verify data integrity
original_traces = np.load(traces_path)
print(f"Data integrity check: {np.array_equal(original_traces, zarr_array[:])}")
print("\n=== Dataset Utilities Test ===")
# Test dataset info
info = get_dataset_info()
print(f"Dataset info: {info['total_examples']} examples")
# Test validation
validation = validate_dataset_integrity()
print(f"Validation passed: {validation['traces_data_match']}")
# Test data loader
dl_manager = TestDownloadManager()
traces_path = os.path.join(os.path.dirname(__file__), "data", "traces.npy")
zarr_zip_path = dl_manager.download_zarr_chunks(traces_path)
batch_gen = create_data_loader(zarr_zip_path, batch_size=16)
first_batch = next(batch_gen())
print(f"First batch shape: traces={first_batch['traces'].shape}, labels={first_batch['labels'].shape}")
# Test hub preparation
hub_status = prepare_for_hub_upload()
print(f"Ready for Hub upload: {hub_status['ready_for_upload']}")
print("\n✅ All utilities working correctly!")