| | """
|
| | Tests for Batch Processing and GPU Path (Phase 3.5.5)
|
| | =====================================================
|
| | Uses unittest.mock to simulate GPU availability and multiprocessing behavior.
|
| | """
|
| |
|
| | import unittest
|
| | from unittest.mock import MagicMock, patch
|
| | import numpy as np
|
| |
|
| | from mnemocore.core.batch_ops import BatchProcessor
|
| | from mnemocore.core.binary_hdv import BinaryHDV
|
| |
|
| | class TestBatchOps(unittest.TestCase):
|
| |
|
| | def setUp(self):
|
| |
|
| | self.dim = 16
|
| | self.texts = ["hello world", "test memory"]
|
| |
|
| | def test_cpu_device_selection(self):
|
| | """Verify fallback to CPU when GPU unavailable."""
|
| | with patch("mnemocore.core.batch_ops.torch") as mock_torch:
|
| | mock_torch.cuda.is_available.return_value = False
|
| | mock_torch.backends.mps.is_available.return_value = False
|
| | bp = BatchProcessor(use_gpu=True)
|
| | self.assertEqual(bp.device, "cpu")
|
| |
|
| | def test_gpu_device_selection(self):
|
| | """Verify selection of CUDA when available."""
|
| | with patch("mnemocore.core.batch_ops.torch") as mock_torch, \
|
| | patch("mnemocore.core.batch_ops.TORCH_AVAILABLE", True):
|
| | mock_torch.cuda.is_available.return_value = True
|
| | mock_torch.backends.mps.is_available.return_value = False
|
| | bp = BatchProcessor(use_gpu=True)
|
| | self.assertEqual(bp.device, "cuda")
|
| |
|
| | def test_encode_batch(self):
|
| | """Test parallel CPU encoding logic."""
|
| |
|
| | bp = BatchProcessor(use_gpu=False, num_workers=1)
|
| |
|
| |
|
| | results = bp.encode_batch(self.texts, dimension=self.dim)
|
| |
|
| | self.assertEqual(len(results), 2)
|
| | self.assertIsInstance(results[0], BinaryHDV)
|
| | self.assertEqual(results[0].dimension, self.dim)
|
| |
|
| |
|
| | self.assertNotEqual(results[0], results[1])
|
| |
|
| | def test_search_cpu(self):
|
| | """Test search logic on CPU backend."""
|
| | bp = BatchProcessor(use_gpu=False)
|
| |
|
| | q = BinaryHDV.random(self.dim)
|
| | t1 = BinaryHDV.random(self.dim)
|
| | t2 = q
|
| |
|
| |
|
| | while q == t1:
|
| | t1 = BinaryHDV.random(self.dim)
|
| |
|
| | queries = [q]
|
| | targets = [t1, t2]
|
| |
|
| | dists = bp.search_batch(queries, targets)
|
| |
|
| | self.assertEqual(dists.shape, (1, 2))
|
| | self.assertEqual(dists[0, 1], 0)
|
| | self.assertGreater(dists[0, 0], 0)
|
| |
|
| | @patch("mnemocore.core.batch_ops.torch")
|
| | def test_search_gpu_mock(self, mock_torch):
|
| | """Test GPU search logic flow (mocked tensor operations)."""
|
| |
|
| | mock_torch.cuda.is_available.return_value = True
|
| | bp = BatchProcessor(use_gpu=True)
|
| |
|
| | bp.device = "cuda"
|
| |
|
| |
|
| |
|
| | q_mock = MagicMock()
|
| | t_mock = MagicMock()
|
| | mock_torch.from_numpy.side_effect = [q_mock, t_mock]
|
| |
|
| |
|
| | xor_res = MagicMock()
|
| | mock_torch.bitwise_xor.return_value = xor_result = MagicMock()
|
| | xor_result.long.return_value = "indices"
|
| |
|
| |
|
| |
|
| | bp.popcount_table_gpu = MagicMock()
|
| | counts = MagicMock()
|
| | bp.popcount_table_gpu.__getitem__.return_value = counts
|
| |
|
| |
|
| | dists_tensor = MagicMock()
|
| | counts.sum.return_value = 123
|
| |
|
| |
|
| | queries = [BinaryHDV.random(16)]
|
| | targets = [BinaryHDV.random(16)]
|
| |
|
| |
|
| | q_mock.to.return_value = q_mock
|
| | t_mock.to.return_value = t_mock
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | try:
|
| | bp.search_batch(queries, targets)
|
| | except Exception:
|
| |
|
| |
|
| | pass
|
| |
|
| | mock_torch.from_numpy.assert_called()
|
| | mock_torch.bitwise_xor.assert_called()
|
| |
|
| | if __name__ == '__main__':
|
| | unittest.main()
|
| |
|